diff options
Diffstat (limited to 'drivers')
66 files changed, 2399 insertions, 837 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 7c74d8cba44f..966aab902d19 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -52,9 +52,6 @@ static unsigned int num_devices = 1; static size_t huge_class_size; static const struct block_device_operations zram_devops; -#ifdef CONFIG_ZRAM_WRITEBACK -static const struct block_device_operations zram_wb_devops; -#endif static void zram_free_page(struct zram *zram, size_t index); static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, @@ -546,17 +543,6 @@ static ssize_t backing_dev_store(struct device *dev, zram->backing_dev = backing_dev; zram->bitmap = bitmap; zram->nr_pages = nr_pages; - /* - * With writeback feature, zram does asynchronous IO so it's no longer - * synchronous device so let's remove synchronous io flag. Othewise, - * upper layer(e.g., swap) could wait IO completion rather than - * (submit and return), which will cause system sluggish. - * Furthermore, when the IO function returns(e.g., swap_readpage), - * upper layer expects IO was done so it could deallocate the page - * freely but in fact, IO is going on so finally could cause - * use-after-free when the IO is really done. - */ - zram->disk->fops = &zram_wb_devops; up_write(&zram->init_lock); pr_info("setup backing device %s\n", file_name); @@ -1270,6 +1256,9 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, struct bio_vec bvec; zram_slot_unlock(zram, index); + /* A null bio means rw_page was used, we must fallback to bio */ + if (!bio) + return -EOPNOTSUPP; bvec.bv_page = page; bvec.bv_len = PAGE_SIZE; @@ -1856,15 +1845,6 @@ static const struct block_device_operations zram_devops = { .owner = THIS_MODULE }; -#ifdef CONFIG_ZRAM_WRITEBACK -static const struct block_device_operations zram_wb_devops = { - .open = zram_open, - .submit_bio = zram_submit_bio, - .swap_slot_free_notify = zram_slot_free_notify, - .owner = THIS_MODULE -}; -#endif - static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); static DEVICE_ATTR_RO(initstate); diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index d429ba52a719..943ea67bf135 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -136,7 +136,6 @@ static int clk_generated_determine_rate(struct clk_hw *hw, { struct clk_generated *gck = to_clk_generated(hw); struct clk_hw *parent = NULL; - struct clk_rate_request req_parent = *req; long best_rate = -EINVAL; unsigned long min_rate, parent_rate; int best_diff = -1; @@ -192,7 +191,9 @@ static int clk_generated_determine_rate(struct clk_hw *hw, goto end; for (div = 1; div < GENERATED_MAX_DIV + 2; div++) { - req_parent.rate = req->rate * div; + struct clk_rate_request req_parent; + + clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate * div); if (__clk_determine_rate(parent, &req_parent)) continue; clk_generated_best_diff(req, parent, req_parent.rate, div, diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index 164e2959c7cf..b7cd1924de52 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -581,7 +581,6 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { struct clk_master *master = to_clk_master(hw); - struct clk_rate_request req_parent = *req; struct clk_hw *parent; long best_rate = LONG_MIN, best_diff = LONG_MIN; unsigned long parent_rate; @@ -618,11 +617,15 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw, goto end; for (div = 0; div < MASTER_PRES_MAX + 1; div++) { + struct clk_rate_request req_parent; + unsigned long req_rate; + if (div == MASTER_PRES_MAX) - req_parent.rate = req->rate * 3; + req_rate = req->rate * 3; else - req_parent.rate = req->rate << div; + req_rate = req->rate << div; + clk_hw_forward_rate_request(hw, req, parent, &req_parent, req_rate); if (__clk_determine_rate(parent, &req_parent)) continue; diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index e14fa5ac734c..5104d4025484 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -269,7 +269,6 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, { struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); struct clk_hw *parent = clk_hw_get_parent(hw); - struct clk_rate_request req_parent = *req; unsigned long parent_rate = clk_hw_get_rate(parent); unsigned long tmp_rate; long best_rate = LONG_MIN; @@ -302,8 +301,9 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, goto end; for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { - req_parent.rate = req->rate << shift; + struct clk_rate_request req_parent; + clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate << shift); if (__clk_determine_rate(parent, &req_parent)) continue; diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index b9c5f904f535..edfa94641bbf 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -85,10 +85,11 @@ static int clk_composite_determine_rate(struct clk_hw *hw, req->best_parent_hw = NULL; if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) { - struct clk_rate_request tmp_req = *req; + struct clk_rate_request tmp_req; parent = clk_hw_get_parent(mux_hw); + clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate); ret = clk_composite_determine_rate_for_parent(rate_hw, &tmp_req, parent, @@ -104,12 +105,13 @@ static int clk_composite_determine_rate(struct clk_hw *hw, } for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) { - struct clk_rate_request tmp_req = *req; + struct clk_rate_request tmp_req; parent = clk_hw_get_parent_by_index(mux_hw, i); if (!parent) continue; + clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate); ret = clk_composite_determine_rate_for_parent(rate_hw, &tmp_req, parent, diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index f6b2bf558486..a2c2b5203b0a 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -386,13 +386,13 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, const struct clk_div_table *table, u8 width, unsigned long flags) { - struct clk_rate_request req = { - .rate = rate, - .best_parent_rate = *prate, - .best_parent_hw = parent, - }; + struct clk_rate_request req; int ret; + clk_hw_init_rate_request(hw, &req, rate); + req.best_parent_rate = *prate; + req.best_parent_hw = parent; + ret = divider_determine_rate(hw, &req, table, width, flags); if (ret) return ret; @@ -408,13 +408,13 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, const struct clk_div_table *table, u8 width, unsigned long flags, unsigned int val) { - struct clk_rate_request req = { - .rate = rate, - .best_parent_rate = *prate, - .best_parent_hw = parent, - }; + struct clk_rate_request req; int ret; + clk_hw_init_rate_request(hw, &req, rate); + req.best_parent_rate = *prate; + req.best_parent_hw = parent; + ret = divider_ro_determine_rate(hw, &req, table, width, flags, val); if (ret) return ret; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index dd810bcd2700..c3c3f8c07258 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -536,6 +536,53 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, return now <= rate && now > best; } +static void clk_core_init_rate_req(struct clk_core * const core, + struct clk_rate_request *req, + unsigned long rate); + +static int clk_core_round_rate_nolock(struct clk_core *core, + struct clk_rate_request *req); + +static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent) +{ + struct clk_core *tmp; + unsigned int i; + + /* Optimize for the case where the parent is already the parent. */ + if (core->parent == parent) + return true; + + for (i = 0; i < core->num_parents; i++) { + tmp = clk_core_get_parent_by_index(core, i); + if (!tmp) + continue; + + if (tmp == parent) + return true; + } + + return false; +} + +static void +clk_core_forward_rate_req(struct clk_core *core, + const struct clk_rate_request *old_req, + struct clk_core *parent, + struct clk_rate_request *req, + unsigned long parent_rate) +{ + if (WARN_ON(!clk_core_has_parent(core, parent))) + return; + + clk_core_init_rate_req(parent, req, parent_rate); + + if (req->min_rate < old_req->min_rate) + req->min_rate = old_req->min_rate; + + if (req->max_rate > old_req->max_rate) + req->max_rate = old_req->max_rate; +} + int clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, unsigned long flags) @@ -543,14 +590,20 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_core *core = hw->core, *parent, *best_parent = NULL; int i, num_parents, ret; unsigned long best = 0; - struct clk_rate_request parent_req = *req; /* if NO_REPARENT flag set, pass through to current parent */ if (core->flags & CLK_SET_RATE_NO_REPARENT) { parent = core->parent; if (core->flags & CLK_SET_RATE_PARENT) { - ret = __clk_determine_rate(parent ? parent->hw : NULL, - &parent_req); + struct clk_rate_request parent_req; + + if (!parent) { + req->rate = 0; + return 0; + } + + clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); + ret = clk_core_round_rate_nolock(parent, &parent_req); if (ret) return ret; @@ -567,23 +620,29 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw, /* find the parent that can provide the fastest rate <= rate */ num_parents = core->num_parents; for (i = 0; i < num_parents; i++) { + unsigned long parent_rate; + parent = clk_core_get_parent_by_index(core, i); if (!parent) continue; if (core->flags & CLK_SET_RATE_PARENT) { - parent_req = *req; - ret = __clk_determine_rate(parent->hw, &parent_req); + struct clk_rate_request parent_req; + + clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate); + ret = clk_core_round_rate_nolock(parent, &parent_req); if (ret) continue; + + parent_rate = parent_req.rate; } else { - parent_req.rate = clk_core_get_rate_nolock(parent); + parent_rate = clk_core_get_rate_nolock(parent); } - if (mux_is_better_rate(req->rate, parent_req.rate, + if (mux_is_better_rate(req->rate, parent_rate, best, flags)) { best_parent = parent; - best = parent_req.rate; + best = parent_rate; } } @@ -625,6 +684,22 @@ static void clk_core_get_boundaries(struct clk_core *core, *max_rate = min(*max_rate, clk_user->max_rate); } +/* + * clk_hw_get_rate_range() - returns the clock rate range for a hw clk + * @hw: the hw clk we want to get the range from + * @min_rate: pointer to the variable that will hold the minimum + * @max_rate: pointer to the variable that will hold the maximum + * + * Fills the @min_rate and @max_rate variables with the minimum and + * maximum that clock can reach. + */ +void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate, + unsigned long *max_rate) +{ + clk_core_get_boundaries(hw->core, min_rate, max_rate); +} +EXPORT_SYMBOL_GPL(clk_hw_get_rate_range); + static bool clk_core_check_boundaries(struct clk_core *core, unsigned long min_rate, unsigned long max_rate) @@ -1340,7 +1415,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core, if (!core) return 0; - req->rate = clamp(req->rate, req->min_rate, req->max_rate); + /* + * Some clock providers hand-craft their clk_rate_requests and + * might not fill min_rate and max_rate. + * + * If it's the case, clamping the rate is equivalent to setting + * the rate to 0 which is bad. Skip the clamping but complain so + * that it gets fixed, hopefully. + */ + if (!req->min_rate && !req->max_rate) + pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n", + __func__, core->name); + else + req->rate = clamp(req->rate, req->min_rate, req->max_rate); /* * At this point, core protection will be disabled @@ -1367,13 +1454,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core, } static void clk_core_init_rate_req(struct clk_core * const core, - struct clk_rate_request *req) + struct clk_rate_request *req, + unsigned long rate) { struct clk_core *parent; if (WARN_ON(!core || !req)) return; + memset(req, 0, sizeof(*req)); + + req->rate = rate; + clk_core_get_boundaries(core, &req->min_rate, &req->max_rate); + parent = core->parent; if (parent) { req->best_parent_hw = parent->hw; @@ -1384,6 +1477,51 @@ static void clk_core_init_rate_req(struct clk_core * const core, } } +/** + * clk_hw_init_rate_request - Initializes a clk_rate_request + * @hw: the clk for which we want to submit a rate request + * @req: the clk_rate_request structure we want to initialise + * @rate: the rate which is to be requested + * + * Initializes a clk_rate_request structure to submit to + * __clk_determine_rate() or similar functions. + */ +void clk_hw_init_rate_request(const struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long rate) +{ + if (WARN_ON(!hw || !req)) + return; + + clk_core_init_rate_req(hw->core, req, rate); +} +EXPORT_SYMBOL_GPL(clk_hw_init_rate_request); + +/** + * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent + * @hw: the original clock that got the rate request + * @old_req: the original clk_rate_request structure we want to forward + * @parent: the clk we want to forward @old_req to + * @req: the clk_rate_request structure we want to initialise + * @parent_rate: The rate which is to be requested to @parent + * + * Initializes a clk_rate_request structure to submit to a clock parent + * in __clk_determine_rate() or similar functions. + */ +void clk_hw_forward_rate_request(const struct clk_hw *hw, + const struct clk_rate_request *old_req, + const struct clk_hw *parent, + struct clk_rate_request *req, + unsigned long parent_rate) +{ + if (WARN_ON(!hw || !old_req || !parent || !req)) + return; + + clk_core_forward_rate_req(hw->core, old_req, + parent->core, req, + parent_rate); +} + static bool clk_core_can_round(struct clk_core * const core) { return core->ops->determine_rate || core->ops->round_rate; @@ -1392,6 +1530,8 @@ static bool clk_core_can_round(struct clk_core * const core) static int clk_core_round_rate_nolock(struct clk_core *core, struct clk_rate_request *req) { + int ret; + lockdep_assert_held(&prepare_lock); if (!core) { @@ -1399,12 +1539,22 @@ static int clk_core_round_rate_nolock(struct clk_core *core, return 0; } - clk_core_init_rate_req(core, req); - if (clk_core_can_round(core)) return clk_core_determine_round_nolock(core, req); - else if (core->flags & CLK_SET_RATE_PARENT) - return clk_core_round_rate_nolock(core->parent, req); + + if (core->flags & CLK_SET_RATE_PARENT) { + struct clk_rate_request parent_req; + + clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate); + ret = clk_core_round_rate_nolock(core->parent, &parent_req); + if (ret) + return ret; + + req->best_parent_rate = parent_req.rate; + req->rate = parent_req.rate; + + return 0; + } req->rate = core->rate; return 0; @@ -1448,8 +1598,7 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) int ret; struct clk_rate_request req; - clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate); - req.rate = rate; + clk_core_init_rate_req(hw->core, &req, rate); ret = clk_core_round_rate_nolock(hw->core, &req); if (ret) @@ -1481,8 +1630,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate) if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); - clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate); - req.rate = rate; + clk_core_init_rate_req(clk->core, &req, rate); ret = clk_core_round_rate_nolock(clk->core, &req); @@ -1611,6 +1759,7 @@ static unsigned long clk_recalc(struct clk_core *core, /** * __clk_recalc_rates * @core: first clk in the subtree + * @update_req: Whether req_rate should be updated with the new rate * @msg: notification type (see include/linux/clk.h) * * Walks the subtree of clks starting with clk and recalculates rates as it @@ -1620,7 +1769,8 @@ static unsigned long clk_recalc(struct clk_core *core, * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, * if necessary. */ -static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) +static void __clk_recalc_rates(struct clk_core *core, bool update_req, + unsigned long msg) { unsigned long old_rate; unsigned long parent_rate = 0; @@ -1634,6 +1784,8 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) parent_rate = core->parent->rate; core->rate = clk_recalc(core, parent_rate); + if (update_req) + core->req_rate = core->rate; /* * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE @@ -1643,13 +1795,13 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) __clk_notify(core, msg, old_rate, core->rate); hlist_for_each_entry(child, &core->children, child_node) - __clk_recalc_rates(child, msg); + __clk_recalc_rates(child, update_req, msg); } static unsigned long clk_core_get_rate_recalc(struct clk_core *core) { if (core && (core->flags & CLK_GET_RATE_NOCACHE)) - __clk_recalc_rates(core, 0); + __clk_recalc_rates(core, false, 0); return clk_core_get_rate_nolock(core); } @@ -1659,8 +1811,9 @@ static unsigned long clk_core_get_rate_recalc(struct clk_core *core) * @clk: the clk whose rate is being returned * * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag - * is set, which means a recalc_rate will be issued. - * If clk is NULL then returns 0. + * is set, which means a recalc_rate will be issued. Can be called regardless of + * the clock enabledness. If clk is NULL, or if an error occurred, then returns + * 0. */ unsigned long clk_get_rate(struct clk *clk) { @@ -1864,6 +2017,7 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, flags = clk_enable_lock(); clk_reparent(core, old_parent); clk_enable_unlock(flags); + __clk_set_parent_after(core, old_parent, parent); return ret; @@ -1969,11 +2123,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core, if (clk_core_can_round(core)) { struct clk_rate_request req; - req.rate = rate; - req.min_rate = min_rate; - req.max_rate = max_rate; - - clk_core_init_rate_req(core, &req); + clk_core_init_rate_req(core, &req, rate); ret = clk_core_determine_round_nolock(core, &req); if (ret < 0) @@ -2172,8 +2322,7 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core, if (cnt < 0) return cnt; - clk_core_get_boundaries(core, &req.min_rate, &req.max_rate); - req.rate = req_rate; + clk_core_init_rate_req(core, &req, req_rate); ret = clk_core_round_rate_nolock(core, &req); @@ -2324,19 +2473,15 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) } EXPORT_SYMBOL_GPL(clk_set_rate_exclusive); -/** - * clk_set_rate_range - set a rate range for a clock source - * @clk: clock source - * @min: desired minimum clock rate in Hz, inclusive - * @max: desired maximum clock rate in Hz, inclusive - * - * Returns success (0) or negative errno. - */ -int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) +static int clk_set_rate_range_nolock(struct clk *clk, + unsigned long min, + unsigned long max) { int ret = 0; unsigned long old_min, old_max, rate; + lockdep_assert_held(&prepare_lock); + if (!clk) return 0; @@ -2349,8 +2494,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) return -EINVAL; } - clk_prepare_lock(); - if (clk->exclusive_count) clk_core_rate_unprotect(clk->core); @@ -2365,6 +2508,10 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) goto out; } + rate = clk->core->req_rate; + if (clk->core->flags & CLK_GET_RATE_NOCACHE) + rate = clk_core_get_rate_recalc(clk->core); + /* * Since the boundaries have been changed, let's give the * opportunity to the provider to adjust the clock rate based on @@ -2382,7 +2529,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) * - the determine_rate() callback does not really check for * this corner case when determining the rate */ - rate = clamp(clk->core->req_rate, min, max); + rate = clamp(rate, min, max); ret = clk_core_set_rate_nolock(clk->core, rate); if (ret) { /* rollback the changes */ @@ -2394,6 +2541,28 @@ out: if (clk->exclusive_count) clk_core_rate_protect(clk->core); + return ret; +} + +/** + * clk_set_rate_range - set a rate range for a clock source + * @clk: clock source + * @min: desired minimum clock rate in Hz, inclusive + * @max: desired maximum clock rate in Hz, inclusive + * + * Return: 0 for success or negative errno on failure. + */ +int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) +{ + int ret; + + if (!clk) + return 0; + + clk_prepare_lock(); + + ret = clk_set_rate_range_nolock(clk, min, max); + clk_prepare_unlock(); return ret; @@ -2473,7 +2642,7 @@ static void clk_core_reparent(struct clk_core *core, { clk_reparent(core, new_parent); __clk_recalc_accuracies(core); - __clk_recalc_rates(core, POST_RATE_CHANGE); + __clk_recalc_rates(core, true, POST_RATE_CHANGE); } void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) @@ -2494,27 +2663,13 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) * * Returns true if @parent is a possible parent for @clk, false otherwise. */ -bool clk_has_parent(struct clk *clk, struct clk *parent) +bool clk_has_parent(const struct clk *clk, const struct clk *parent) { - struct clk_core *core, *parent_core; - int i; - /* NULL clocks should be nops, so return success if either is NULL. */ if (!clk || !parent) return true; - core = clk->core; - parent_core = parent->core; - - /* Optimize for the case where the parent is already the parent. */ - if (core->parent == parent_core) - return true; - - for (i = 0; i < core->num_parents; i++) - if (!strcmp(core->parents[i].name, parent_core->name)) - return true; - - return false; + return clk_core_has_parent(clk->core, parent->core); } EXPORT_SYMBOL_GPL(clk_has_parent); @@ -2571,9 +2726,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core, /* propagate rate an accuracy recalculation accordingly */ if (ret) { - __clk_recalc_rates(core, ABORT_RATE_CHANGE); + __clk_recalc_rates(core, true, ABORT_RATE_CHANGE); } else { - __clk_recalc_rates(core, POST_RATE_CHANGE); + __clk_recalc_rates(core, true, POST_RATE_CHANGE); __clk_recalc_accuracies(core); } @@ -3470,7 +3625,7 @@ static void clk_core_reparent_orphans_nolock(void) __clk_set_parent_before(orphan, parent); __clk_set_parent_after(orphan, parent, NULL); __clk_recalc_accuracies(orphan); - __clk_recalc_rates(orphan, 0); + __clk_recalc_rates(orphan, true, 0); /* * __clk_init_parent() will set the initial req_rate to @@ -4346,9 +4501,10 @@ void __clk_put(struct clk *clk) } hlist_del(&clk->clks_node); - if (clk->min_rate > clk->core->req_rate || - clk->max_rate < clk->core->req_rate) - clk_core_set_rate_nolock(clk->core, clk->core->req_rate); + + /* If we had any boundaries on that clock, let's drop them. */ + if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) + clk_set_rate_range_nolock(clk, 0, ULONG_MAX); owner = clk->core->owner; kref_put(&clk->core->ref, __clk_release); diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c index 6731a822f4e3..f9a5c2964c65 100644 --- a/drivers/clk/clk_test.c +++ b/drivers/clk/clk_test.c @@ -108,6 +108,39 @@ static const struct clk_ops clk_dummy_single_parent_ops = { .get_parent = clk_dummy_single_get_parent, }; +struct clk_multiple_parent_ctx { + struct clk_dummy_context parents_ctx[2]; + struct clk_hw hw; + u8 current_parent; +}; + +static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_multiple_parent_ctx *ctx = + container_of(hw, struct clk_multiple_parent_ctx, hw); + + if (index >= clk_hw_get_num_parents(hw)) + return -EINVAL; + + ctx->current_parent = index; + + return 0; +} + +static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw) +{ + struct clk_multiple_parent_ctx *ctx = + container_of(hw, struct clk_multiple_parent_ctx, hw); + + return ctx->current_parent; +} + +static const struct clk_ops clk_multiple_parents_mux_ops = { + .get_parent = clk_multiple_parents_mux_get_parent, + .set_parent = clk_multiple_parents_mux_set_parent, + .determine_rate = __clk_mux_determine_rate_closest, +}; + static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops) { struct clk_dummy_context *ctx; @@ -160,12 +193,14 @@ static void clk_test_get_rate(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_EQ(test, rate, ctx->rate); + + clk_put(clk); } /* @@ -179,7 +214,7 @@ static void clk_test_set_get_rate(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -189,6 +224,8 @@ static void clk_test_set_get_rate(struct kunit *test) rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + clk_put(clk); } /* @@ -202,7 +239,7 @@ static void clk_test_set_set_get_rate(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -216,6 +253,8 @@ static void clk_test_set_set_get_rate(struct kunit *test) rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } /* @@ -226,7 +265,7 @@ static void clk_test_round_set_get_rate(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rounded_rate, set_rate; rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1); @@ -240,6 +279,8 @@ static void clk_test_round_set_get_rate(struct kunit *test) set_rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, set_rate, 0); KUNIT_EXPECT_EQ(test, rounded_rate, set_rate); + + clk_put(clk); } static struct kunit_case clk_test_cases[] = { @@ -250,6 +291,11 @@ static struct kunit_case clk_test_cases[] = { {} }; +/* + * Test suite for a basic rate clock, without any parent. + * + * These tests exercise the rate API with simple scenarios + */ static struct kunit_suite clk_test_suite = { .name = "clk-test", .init = clk_test_init, @@ -257,11 +303,912 @@ static struct kunit_suite clk_test_suite = { .test_cases = clk_test_cases, }; +static int clk_uncached_test_init(struct kunit *test) +{ + struct clk_dummy_context *ctx; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + + ctx->rate = DUMMY_CLOCK_INIT_RATE; + ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk", + &clk_dummy_rate_ops, + CLK_GET_RATE_NOCACHE); + + ret = clk_hw_register(NULL, &ctx->hw); + if (ret) + return ret; + + return 0; +} + +/* + * Test that for an uncached clock, the clock framework doesn't cache + * the rate and clk_get_rate() will return the underlying clock rate + * even if it changed. + */ +static void clk_test_uncached_get_rate(struct kunit *test) +{ + struct clk_dummy_context *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + unsigned long rate; + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE); + + /* We change the rate behind the clock framework's back */ + ctx->rate = DUMMY_CLOCK_RATE_1; + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + clk_put(clk); +} + +/* + * Test that for an uncached clock, clk_set_rate_range() will work + * properly if the rate hasn't changed. + */ +static void clk_test_uncached_set_range(struct kunit *test) +{ + struct clk_dummy_context *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + unsigned long rate; + + KUNIT_ASSERT_EQ(test, + clk_set_rate_range(clk, + DUMMY_CLOCK_RATE_1, + DUMMY_CLOCK_RATE_2), + 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); +} + +/* + * Test that for an uncached clock, clk_set_rate_range() will work + * properly if the rate has changed in hardware. + * + * In this case, it means that if the rate wasn't initially in the range + * we're trying to set, but got changed at some point into the range + * without the kernel knowing about it, its rate shouldn't be affected. + */ +static void clk_test_uncached_updated_rate_set_range(struct kunit *test) +{ + struct clk_dummy_context *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + unsigned long rate; + + /* We change the rate behind the clock framework's back */ + ctx->rate = DUMMY_CLOCK_RATE_1 + 1000; + KUNIT_ASSERT_EQ(test, + clk_set_rate_range(clk, + DUMMY_CLOCK_RATE_1, + DUMMY_CLOCK_RATE_2), + 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000); + + clk_put(clk); +} + +static struct kunit_case clk_uncached_test_cases[] = { + KUNIT_CASE(clk_test_uncached_get_rate), + KUNIT_CASE(clk_test_uncached_set_range), + KUNIT_CASE(clk_test_uncached_updated_rate_set_range), + {} +}; + +/* + * Test suite for a basic, uncached, rate clock, without any parent. + * + * These tests exercise the rate API with simple scenarios + */ +static struct kunit_suite clk_uncached_test_suite = { + .name = "clk-uncached-test", + .init = clk_uncached_test_init, + .exit = clk_test_exit, + .test_cases = clk_uncached_test_cases, +}; + +static int +clk_multiple_parents_mux_test_init(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx; + const char *parents[2] = { "parent-0", "parent-1"}; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + + ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0", + &clk_dummy_rate_ops, + 0); + ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1; + ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw); + if (ret) + return ret; + + ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1", + &clk_dummy_rate_ops, + 0); + ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2; + ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw); + if (ret) + return ret; + + ctx->current_parent = 0; + ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents, + &clk_multiple_parents_mux_ops, + CLK_SET_RATE_PARENT); + ret = clk_hw_register(NULL, &ctx->hw); + if (ret) + return ret; + + return 0; +} + +static void +clk_multiple_parents_mux_test_exit(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + + clk_hw_unregister(&ctx->hw); + clk_hw_unregister(&ctx->parents_ctx[0].hw); + clk_hw_unregister(&ctx->parents_ctx[1].hw); +} + +/* + * Test that for a clock with multiple parents, clk_get_parent() + * actually returns the current one. + */ +static void +clk_test_multiple_parents_mux_get_parent(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL); + + KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent)); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that for a clock with a multiple parents, clk_has_parent() + * actually reports all of them as parents. + */ +static void +clk_test_multiple_parents_mux_has_parent(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + + parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL); + KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent)); + clk_put(parent); + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent)); + clk_put(parent); + + clk_put(clk); +} + +/* + * Test that for a clock with a multiple parents, if we set a range on + * that clock and the parent is changed, its rate after the reparenting + * is still within the range we asked for. + * + * FIXME: clk_set_parent() only does the reparenting but doesn't + * reevaluate whether the new clock rate is within its boundaries or + * not. + */ +static void +clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent1, *parent2; + unsigned long rate; + int ret; + + kunit_skip(test, "This needs to be fixed in the core."); + + parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1); + KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1)); + + parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2); + + ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate_range(clk, + DUMMY_CLOCK_RATE_1 - 1000, + DUMMY_CLOCK_RATE_1 + 1000); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_parent(clk, parent2); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000); + + clk_put(parent2); + clk_put(parent1); + clk_put(clk); +} + +static struct kunit_case clk_multiple_parents_mux_test_cases[] = { + KUNIT_CASE(clk_test_multiple_parents_mux_get_parent), + KUNIT_CASE(clk_test_multiple_parents_mux_has_parent), + KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate), + {} +}; + +/* + * Test suite for a basic mux clock with two parents, with + * CLK_SET_RATE_PARENT on the child. + * + * These tests exercise the consumer API and check that the state of the + * child and parents are sane and consistent. + */ +static struct kunit_suite +clk_multiple_parents_mux_test_suite = { + .name = "clk-multiple-parents-mux-test", + .init = clk_multiple_parents_mux_test_init, + .exit = clk_multiple_parents_mux_test_exit, + .test_cases = clk_multiple_parents_mux_test_cases, +}; + +static int +clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx; + const char *parents[2] = { "missing-parent", "proper-parent"}; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + + ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent", + &clk_dummy_rate_ops, + 0); + ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE; + ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw); + if (ret) + return ret; + + ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents, + &clk_multiple_parents_mux_ops, + CLK_SET_RATE_PARENT); + ret = clk_hw_register(NULL, &ctx->hw); + if (ret) + return ret; + + return 0; +} + +static void +clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + + clk_hw_unregister(&ctx->hw); + clk_hw_unregister(&ctx->parents_ctx[1].hw); +} + +/* + * Test that, for a mux whose current parent hasn't been registered yet and is + * thus orphan, clk_get_parent() will return NULL. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + + KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL); + + clk_put(clk); +} + +/* + * Test that, for a mux whose current parent hasn't been registered yet, + * calling clk_set_parent() to a valid parent will properly update the + * mux parent and its orphan status. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent, *new_parent; + int ret; + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + ret = clk_set_parent(clk, parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + new_parent = clk_get_parent(clk); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent)); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that, for a mux that started orphan but got switched to a valid + * parent, calling clk_drop_range() on the mux won't affect the parent + * rate. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long parent_rate, new_parent_rate; + int ret; + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + parent_rate = clk_get_rate(parent); + KUNIT_ASSERT_GT(test, parent_rate, 0); + + ret = clk_set_parent(clk, parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_drop_range(clk); + KUNIT_ASSERT_EQ(test, ret, 0); + + new_parent_rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, new_parent_rate, 0); + KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that, for a mux that started orphan but got switched to a valid + * parent, the rate of the mux and its new parent are consistent. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long parent_rate, rate; + int ret; + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + parent_rate = clk_get_rate(parent); + KUNIT_ASSERT_GT(test, parent_rate, 0); + + ret = clk_set_parent(clk, parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, parent_rate, rate); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that, for a mux that started orphan but got switched to a valid + * parent, calling clk_put() on the mux won't affect the parent rate. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk *clk, *parent; + unsigned long parent_rate, new_parent_rate; + int ret; + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + clk = clk_hw_get_clk(&ctx->hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk); + + parent_rate = clk_get_rate(parent); + KUNIT_ASSERT_GT(test, parent_rate, 0); + + ret = clk_set_parent(clk, parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + clk_put(clk); + + new_parent_rate = clk_get_rate(parent); + KUNIT_ASSERT_GT(test, new_parent_rate, 0); + KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate); + + clk_put(parent); +} + +/* + * Test that, for a mux that started orphan but got switched to a valid + * parent, calling clk_set_rate_range() will affect the parent state if + * its rate is out of range. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long rate; + int ret; + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + ret = clk_set_parent(clk, parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that, for a mux that started orphan but got switched to a valid + * parent, calling clk_set_rate_range() won't affect the parent state if + * its rate is within range. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long parent_rate, new_parent_rate; + int ret; + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + parent_rate = clk_get_rate(parent); + KUNIT_ASSERT_GT(test, parent_rate, 0); + + ret = clk_set_parent(clk, parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate_range(clk, + DUMMY_CLOCK_INIT_RATE - 1000, + DUMMY_CLOCK_INIT_RATE + 1000); + KUNIT_ASSERT_EQ(test, ret, 0); + + new_parent_rate = clk_get_rate(parent); + KUNIT_ASSERT_GT(test, new_parent_rate, 0); + KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that, for a mux whose current parent hasn't been registered yet, + * calling clk_set_rate_range() will succeed, and will be taken into + * account when rounding a rate. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + unsigned long rate; + int ret; + + ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); +} + +/* + * Test that, for a mux that started orphan, was assigned and rate and + * then got switched to a valid parent, its rate is eventually within + * range. + * + * FIXME: Even though we update the rate as part of clk_set_parent(), we + * don't evaluate whether that new rate is within range and needs to be + * adjusted. + */ +static void +clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test) +{ + struct clk_multiple_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long rate; + int ret; + + kunit_skip(test, "This needs to be fixed in the core."); + + clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2); + + parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent); + + ret = clk_set_parent(clk, parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(parent); + clk_put(clk); +} + +static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = { + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate), + KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate), + {} +}; + +/* + * Test suite for a basic mux clock with two parents. The default parent + * isn't registered, only the second parent is. By default, the clock + * will thus be orphan. + * + * These tests exercise the behaviour of the consumer API when dealing + * with an orphan clock, and how we deal with the transition to a valid + * parent. + */ +static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = { + .name = "clk-orphan-transparent-multiple-parent-mux-test", + .init = clk_orphan_transparent_multiple_parent_mux_test_init, + .exit = clk_orphan_transparent_multiple_parent_mux_test_exit, + .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases, +}; + struct clk_single_parent_ctx { struct clk_dummy_context parent_ctx; struct clk_hw hw; }; +static int clk_single_parent_mux_test_init(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + + ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE; + ctx->parent_ctx.hw.init = + CLK_HW_INIT_NO_PARENT("parent-clk", + &clk_dummy_rate_ops, + 0); + + ret = clk_hw_register(NULL, &ctx->parent_ctx.hw); + if (ret) + return ret; + + ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk", + &clk_dummy_single_parent_ops, + CLK_SET_RATE_PARENT); + + ret = clk_hw_register(NULL, &ctx->hw); + if (ret) + return ret; + + return 0; +} + +static void +clk_single_parent_mux_test_exit(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + + clk_hw_unregister(&ctx->hw); + clk_hw_unregister(&ctx->parent_ctx.hw); +} + +/* + * Test that for a clock with a single parent, clk_get_parent() actually + * returns the parent. + */ +static void +clk_test_single_parent_mux_get_parent(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL); + + KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent)); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that for a clock with a single parent, clk_has_parent() actually + * reports it as a parent. + */ +static void +clk_test_single_parent_mux_has_parent(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL); + + KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent)); + + clk_put(parent); + clk_put(clk); +} + +/* + * Test that for a clock that can't modify its rate and with a single + * parent, if we set disjoints range on the parent and then the child, + * the second will return an error. + * + * FIXME: clk_set_rate_range() only considers the current clock when + * evaluating whether ranges are disjoints and not the upstream clocks + * ranges. + */ +static void +clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + int ret; + + kunit_skip(test, "This needs to be fixed in the core."); + + parent = clk_get_parent(clk); + KUNIT_ASSERT_PTR_NE(test, parent, NULL); + + ret = clk_set_rate_range(parent, 1000, 2000); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate_range(clk, 3000, 4000); + KUNIT_EXPECT_LT(test, ret, 0); + + clk_put(clk); +} + +/* + * Test that for a clock that can't modify its rate and with a single + * parent, if we set disjoints range on the child and then the parent, + * the second will return an error. + * + * FIXME: clk_set_rate_range() only considers the current clock when + * evaluating whether ranges are disjoints and not the downstream clocks + * ranges. + */ +static void +clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + int ret; + + kunit_skip(test, "This needs to be fixed in the core."); + + parent = clk_get_parent(clk); + KUNIT_ASSERT_PTR_NE(test, parent, NULL); + + ret = clk_set_rate_range(clk, 1000, 2000); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate_range(parent, 3000, 4000); + KUNIT_EXPECT_LT(test, ret, 0); + + clk_put(clk); +} + +/* + * Test that for a clock that can't modify its rate and with a single + * parent, if we set a range on the parent and then call + * clk_round_rate(), the boundaries of the parent are taken into + * account. + */ +static void +clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long rate; + int ret; + + parent = clk_get_parent(clk); + KUNIT_ASSERT_PTR_NE(test, parent, NULL); + + ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); +} + +/* + * Test that for a clock that can't modify its rate and with a single + * parent, if we set a range on the parent and a more restrictive one on + * the child, and then call clk_round_rate(), the boundaries of the + * two clocks are taken into account. + */ +static void +clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long rate; + int ret; + + parent = clk_get_parent(clk); + KUNIT_ASSERT_PTR_NE(test, parent, NULL); + + ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000); + + rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000); + + clk_put(clk); +} + +/* + * Test that for a clock that can't modify its rate and with a single + * parent, if we set a range on the child and a more restrictive one on + * the parent, and then call clk_round_rate(), the boundaries of the + * two clocks are taken into account. + */ +static void +clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test) +{ + struct clk_single_parent_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *parent; + unsigned long rate; + int ret; + + parent = clk_get_parent(clk); + KUNIT_ASSERT_PTR_NE(test, parent, NULL); + + ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000); + + rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000); + KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000); + + clk_put(clk); +} + +static struct kunit_case clk_single_parent_mux_test_cases[] = { + KUNIT_CASE(clk_test_single_parent_mux_get_parent), + KUNIT_CASE(clk_test_single_parent_mux_has_parent), + KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last), + KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last), + KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller), + KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only), + KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller), + {} +}; + +/* + * Test suite for a basic mux clock with one parent, with + * CLK_SET_RATE_PARENT on the child. + * + * These tests exercise the consumer API and check that the state of the + * child and parent are sane and consistent. + */ +static struct kunit_suite +clk_single_parent_mux_test_suite = { + .name = "clk-single-parent-mux-test", + .init = clk_single_parent_mux_test_init, + .exit = clk_single_parent_mux_test_exit, + .test_cases = clk_single_parent_mux_test_cases, +}; + static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test) { struct clk_single_parent_ctx *ctx; @@ -298,23 +1245,18 @@ static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test return 0; } -static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test) -{ - struct clk_single_parent_ctx *ctx = test->priv; - - clk_hw_unregister(&ctx->hw); - clk_hw_unregister(&ctx->parent_ctx.hw); -} - /* * Test that a mux-only clock, with an initial rate within a range, * will still have the same rate after the range has been enforced. + * + * See: + * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/ */ static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test) { struct clk_single_parent_ctx *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate, new_rate; rate = clk_get_rate(clk); @@ -329,6 +1271,8 @@ static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test) new_rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, new_rate, 0); KUNIT_EXPECT_EQ(test, rate, new_rate); + + clk_put(clk); } static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = { @@ -336,13 +1280,151 @@ static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {} }; +/* + * Test suite for a basic mux clock with one parent. The parent is + * registered after its child. The clock will thus be an orphan when + * registered, but will no longer be when the tests run. + * + * These tests make sure a clock that used to be orphan has a sane, + * consistent, behaviour. + */ static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = { .name = "clk-orphan-transparent-single-parent-test", .init = clk_orphan_transparent_single_parent_mux_test_init, - .exit = clk_orphan_transparent_single_parent_mux_test_exit, + .exit = clk_single_parent_mux_test_exit, .test_cases = clk_orphan_transparent_single_parent_mux_test_cases, }; +struct clk_single_parent_two_lvl_ctx { + struct clk_dummy_context parent_parent_ctx; + struct clk_dummy_context parent_ctx; + struct clk_hw hw; +}; + +static int +clk_orphan_two_level_root_last_test_init(struct kunit *test) +{ + struct clk_single_parent_two_lvl_ctx *ctx; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + + ctx->parent_ctx.hw.init = + CLK_HW_INIT("intermediate-parent", + "root-parent", + &clk_dummy_single_parent_ops, + CLK_SET_RATE_PARENT); + ret = clk_hw_register(NULL, &ctx->parent_ctx.hw); + if (ret) + return ret; + + ctx->hw.init = + CLK_HW_INIT("test-clk", "intermediate-parent", + &clk_dummy_single_parent_ops, + CLK_SET_RATE_PARENT); + ret = clk_hw_register(NULL, &ctx->hw); + if (ret) + return ret; + + ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE; + ctx->parent_parent_ctx.hw.init = + CLK_HW_INIT_NO_PARENT("root-parent", + &clk_dummy_rate_ops, + 0); + ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw); + if (ret) + return ret; + + return 0; +} + +static void +clk_orphan_two_level_root_last_test_exit(struct kunit *test) +{ + struct clk_single_parent_two_lvl_ctx *ctx = test->priv; + + clk_hw_unregister(&ctx->hw); + clk_hw_unregister(&ctx->parent_ctx.hw); + clk_hw_unregister(&ctx->parent_parent_ctx.hw); +} + +/* + * Test that, for a clock whose parent used to be orphan, clk_get_rate() + * will return the proper rate. + */ +static void +clk_orphan_two_level_root_last_test_get_rate(struct kunit *test) +{ + struct clk_single_parent_two_lvl_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + unsigned long rate; + + rate = clk_get_rate(clk); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE); + + clk_put(clk); +} + +/* + * Test that, for a clock whose parent used to be orphan, + * clk_set_rate_range() won't affect its rate if it is already within + * range. + * + * See (for Exynos 4210): + * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/ + */ +static void +clk_orphan_two_level_root_last_test_set_range(struct kunit *test) +{ + struct clk_single_parent_two_lvl_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + unsigned long rate; + int ret; + + ret = clk_set_rate_range(clk, + DUMMY_CLOCK_INIT_RATE - 1000, + DUMMY_CLOCK_INIT_RATE + 1000); + KUNIT_ASSERT_EQ(test, ret, 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE); + + clk_put(clk); +} + +static struct kunit_case +clk_orphan_two_level_root_last_test_cases[] = { + KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate), + KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range), + {} +}; + +/* + * Test suite for a basic, transparent, clock with a parent that is also + * such a clock. The parent's parent is registered last, while the + * parent and its child are registered in that order. The intermediate + * and leaf clocks will thus be orphan when registered, but the leaf + * clock itself will always have its parent and will never be + * reparented. Indeed, it's only orphan because its parent is. + * + * These tests exercise the behaviour of the consumer API when dealing + * with an orphan clock, and how we deal with the transition to a valid + * parent. + */ +static struct kunit_suite +clk_orphan_two_level_root_last_test_suite = { + .name = "clk-orphan-two-level-root-last-test", + .init = clk_orphan_two_level_root_last_test_init, + .exit = clk_orphan_two_level_root_last_test_exit, + .test_cases = clk_orphan_two_level_root_last_test_cases, +}; + /* * Test that clk_set_rate_range won't return an error for a valid range * and that it will make sure the rate of the clock is within the @@ -352,7 +1434,7 @@ static void clk_range_test_set_range(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -365,6 +1447,8 @@ static void clk_range_test_set_range(struct kunit *test) KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } /* @@ -375,13 +1459,15 @@ static void clk_range_test_set_range_invalid(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); KUNIT_EXPECT_LT(test, clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_1), 0); + + clk_put(clk); } /* @@ -420,7 +1506,7 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); long rate; KUNIT_ASSERT_EQ(test, @@ -433,6 +1519,8 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test) KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } /* @@ -443,7 +1531,7 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -460,6 +1548,8 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test) KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } /* @@ -472,7 +1562,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); long rounded; KUNIT_ASSERT_EQ(test, @@ -489,6 +1579,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni 0); KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk)); + + clk_put(clk); } /* @@ -499,7 +1591,7 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); long rate; KUNIT_ASSERT_EQ(test, @@ -512,6 +1604,8 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test) KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } /* @@ -522,7 +1616,7 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -539,6 +1633,8 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test) KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1); KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } /* @@ -551,7 +1647,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); long rounded; KUNIT_ASSERT_EQ(test, @@ -568,6 +1664,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun 0); KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk)); + + clk_put(clk); } /* @@ -582,7 +1680,7 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -598,6 +1696,8 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test) rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + clk_put(clk); } /* @@ -612,7 +1712,7 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -628,6 +1728,8 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test) rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } static struct kunit_case clk_range_test_cases[] = { @@ -645,6 +1747,12 @@ static struct kunit_case clk_range_test_cases[] = { {} }; +/* + * Test suite for a basic rate clock, without any parent. + * + * These tests exercise the rate range API: clk_set_rate_range(), + * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(). + */ static struct kunit_suite clk_range_test_suite = { .name = "clk-range-test", .init = clk_test_init, @@ -664,7 +1772,7 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -700,6 +1808,8 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test) rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(clk); } /* @@ -714,7 +1824,7 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); struct clk *user1, *user2; unsigned long rate; @@ -758,14 +1868,79 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test) clk_put(user2); clk_put(user1); + clk_put(clk); +} + +/* + * Test that if we have several subsequent calls to + * clk_set_rate_range(), across multiple users, the core will reevaluate + * whether a new rate is needed, including when a user drop its clock. + * + * With clk_dummy_maximize_rate_ops, this means that the rate will + * trail along the maximum as it evolves. + */ +static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test) +{ + struct clk_dummy_context *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *user1, *user2; + unsigned long rate; + + user1 = clk_hw_get_clk(hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1); + + user2 = clk_hw_get_clk(hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2); + + KUNIT_ASSERT_EQ(test, + clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000), + 0); + + KUNIT_ASSERT_EQ(test, + clk_set_rate_range(user1, + 0, + DUMMY_CLOCK_RATE_2), + 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); + + KUNIT_ASSERT_EQ(test, + clk_set_rate_range(user2, + 0, + DUMMY_CLOCK_RATE_1), + 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + clk_put(user2); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(user1); + clk_put(clk); } static struct kunit_case clk_range_maximize_test_cases[] = { KUNIT_CASE(clk_range_test_set_range_rate_maximized), KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized), + KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized), {} }; +/* + * Test suite for a basic rate clock, without any parent. + * + * These tests exercise the rate range API: clk_set_rate_range(), + * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a + * driver that will always try to run at the highest possible rate. + */ static struct kunit_suite clk_range_maximize_test_suite = { .name = "clk-range-maximize-test", .init = clk_maximize_test_init, @@ -785,7 +1960,7 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); unsigned long rate; KUNIT_ASSERT_EQ(test, @@ -821,6 +1996,8 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test) rate = clk_get_rate(clk); KUNIT_ASSERT_GT(test, rate, 0); KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + clk_put(clk); } /* @@ -835,7 +2012,7 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test) { struct clk_dummy_context *ctx = test->priv; struct clk_hw *hw = &ctx->hw; - struct clk *clk = hw->clk; + struct clk *clk = clk_hw_get_clk(hw, NULL); struct clk *user1, *user2; unsigned long rate; @@ -875,14 +2052,75 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test) clk_put(user2); clk_put(user1); + clk_put(clk); +} + +/* + * Test that if we have several subsequent calls to + * clk_set_rate_range(), across multiple users, the core will reevaluate + * whether a new rate is needed, including when a user drop its clock. + * + * With clk_dummy_minimize_rate_ops, this means that the rate will + * trail along the minimum as it evolves. + */ +static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test) +{ + struct clk_dummy_context *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *user1, *user2; + unsigned long rate; + + user1 = clk_hw_get_clk(hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1); + + user2 = clk_hw_get_clk(hw, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2); + + KUNIT_ASSERT_EQ(test, + clk_set_rate_range(user1, + DUMMY_CLOCK_RATE_1, + ULONG_MAX), + 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + KUNIT_ASSERT_EQ(test, + clk_set_rate_range(user2, + DUMMY_CLOCK_RATE_2, + ULONG_MAX), + 0); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2); + + clk_put(user2); + + rate = clk_get_rate(clk); + KUNIT_ASSERT_GT(test, rate, 0); + KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + clk_put(user1); + clk_put(clk); } static struct kunit_case clk_range_minimize_test_cases[] = { KUNIT_CASE(clk_range_test_set_range_rate_minimized), KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized), + KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized), {} }; +/* + * Test suite for a basic rate clock, without any parent. + * + * These tests exercise the rate range API: clk_set_rate_range(), + * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a + * driver that will always try to run at the lowest possible rate. + */ static struct kunit_suite clk_range_minimize_test_suite = { .name = "clk-range-minimize-test", .init = clk_minimize_test_init, @@ -890,11 +2128,284 @@ static struct kunit_suite clk_range_minimize_test_suite = { .test_cases = clk_range_minimize_test_cases, }; +struct clk_leaf_mux_ctx { + struct clk_multiple_parent_ctx mux_ctx; + struct clk_hw hw; +}; + +static int +clk_leaf_mux_set_rate_parent_test_init(struct kunit *test) +{ + struct clk_leaf_mux_ctx *ctx; + const char *top_parents[2] = { "parent-0", "parent-1" }; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + + ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0", + &clk_dummy_rate_ops, + 0); + ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1; + ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw); + if (ret) + return ret; + + ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1", + &clk_dummy_rate_ops, + 0); + ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2; + ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw); + if (ret) + return ret; + + ctx->mux_ctx.current_parent = 0; + ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents, + &clk_multiple_parents_mux_ops, + 0); + ret = clk_hw_register(NULL, &ctx->mux_ctx.hw); + if (ret) + return ret; + + ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw, + &clk_dummy_single_parent_ops, + CLK_SET_RATE_PARENT); + ret = clk_hw_register(NULL, &ctx->hw); + if (ret) + return ret; + + return 0; +} + +static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test) +{ + struct clk_leaf_mux_ctx *ctx = test->priv; + + clk_hw_unregister(&ctx->hw); + clk_hw_unregister(&ctx->mux_ctx.hw); + clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw); + clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw); +} + +/* + * Test that, for a clock that will forward any rate request to its + * parent, the rate request structure returned by __clk_determine_rate + * is sane and will be what we expect. + */ +static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test) +{ + struct clk_leaf_mux_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk_rate_request req; + unsigned long rate; + int ret; + + rate = clk_get_rate(clk); + KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1); + + clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2); + + ret = __clk_determine_rate(hw, &req); + KUNIT_ASSERT_EQ(test, ret, 0); + + KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2); + KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2); + KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw); + + clk_put(clk); +} + +static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = { + KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate), + {} +}; + +/* + * Test suite for a clock whose parent is a mux with multiple parents. + * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate + * requests to the mux, which will then select which parent is the best + * fit for a given rate. + * + * These tests exercise the behaviour of muxes, and the proper selection + * of parents. + */ +static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = { + .name = "clk-leaf-mux-set-rate-parent", + .init = clk_leaf_mux_set_rate_parent_test_init, + .exit = clk_leaf_mux_set_rate_parent_test_exit, + .test_cases = clk_leaf_mux_set_rate_parent_test_cases, +}; + +struct clk_mux_notifier_rate_change { + bool done; + unsigned long old_rate; + unsigned long new_rate; + wait_queue_head_t wq; +}; + +struct clk_mux_notifier_ctx { + struct clk_multiple_parent_ctx mux_ctx; + struct clk *clk; + struct notifier_block clk_nb; + struct clk_mux_notifier_rate_change pre_rate_change; + struct clk_mux_notifier_rate_change post_rate_change; +}; + +#define NOTIFIER_TIMEOUT_MS 100 + +static int clk_mux_notifier_callback(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct clk_notifier_data *clk_data = data; + struct clk_mux_notifier_ctx *ctx = container_of(nb, + struct clk_mux_notifier_ctx, + clk_nb); + + if (action & PRE_RATE_CHANGE) { + ctx->pre_rate_change.old_rate = clk_data->old_rate; + ctx->pre_rate_change.new_rate = clk_data->new_rate; + ctx->pre_rate_change.done = true; + wake_up_interruptible(&ctx->pre_rate_change.wq); + } + + if (action & POST_RATE_CHANGE) { + ctx->post_rate_change.old_rate = clk_data->old_rate; + ctx->post_rate_change.new_rate = clk_data->new_rate; + ctx->post_rate_change.done = true; + wake_up_interruptible(&ctx->post_rate_change.wq); + } + + return 0; +} + +static int clk_mux_notifier_test_init(struct kunit *test) +{ + struct clk_mux_notifier_ctx *ctx; + const char *top_parents[2] = { "parent-0", "parent-1" }; + int ret; + + ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + test->priv = ctx; + ctx->clk_nb.notifier_call = clk_mux_notifier_callback; + init_waitqueue_head(&ctx->pre_rate_change.wq); + init_waitqueue_head(&ctx->post_rate_change.wq); + + ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0", + &clk_dummy_rate_ops, + 0); + ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1; + ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw); + if (ret) + return ret; + + ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1", + &clk_dummy_rate_ops, + 0); + ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2; + ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw); + if (ret) + return ret; + + ctx->mux_ctx.current_parent = 0; + ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents, + &clk_multiple_parents_mux_ops, + 0); + ret = clk_hw_register(NULL, &ctx->mux_ctx.hw); + if (ret) + return ret; + + ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL); + ret = clk_notifier_register(ctx->clk, &ctx->clk_nb); + if (ret) + return ret; + + return 0; +} + +static void clk_mux_notifier_test_exit(struct kunit *test) +{ + struct clk_mux_notifier_ctx *ctx = test->priv; + struct clk *clk = ctx->clk; + + clk_notifier_unregister(clk, &ctx->clk_nb); + clk_put(clk); + + clk_hw_unregister(&ctx->mux_ctx.hw); + clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw); + clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw); +} + +/* + * Test that if the we have a notifier registered on a mux, the core + * will notify us when we switch to another parent, and with the proper + * old and new rates. + */ +static void clk_mux_notifier_set_parent_test(struct kunit *test) +{ + struct clk_mux_notifier_ctx *ctx = test->priv; + struct clk_hw *hw = &ctx->mux_ctx.hw; + struct clk *clk = clk_hw_get_clk(hw, NULL); + struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL); + int ret; + + ret = clk_set_parent(clk, new_parent); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq, + ctx->pre_rate_change.done, + msecs_to_jiffies(NOTIFIER_TIMEOUT_MS)); + KUNIT_ASSERT_GT(test, ret, 0); + + KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1); + KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2); + + ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq, + ctx->post_rate_change.done, + msecs_to_jiffies(NOTIFIER_TIMEOUT_MS)); + KUNIT_ASSERT_GT(test, ret, 0); + + KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1); + KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2); + + clk_put(new_parent); + clk_put(clk); +} + +static struct kunit_case clk_mux_notifier_test_cases[] = { + KUNIT_CASE(clk_mux_notifier_set_parent_test), + {} +}; + +/* + * Test suite for a mux with multiple parents, and a notifier registered + * on the mux. + * + * These tests exercise the behaviour of notifiers. + */ +static struct kunit_suite clk_mux_notifier_test_suite = { + .name = "clk-mux-notifier", + .init = clk_mux_notifier_test_init, + .exit = clk_mux_notifier_test_exit, + .test_cases = clk_mux_notifier_test_cases, +}; + kunit_test_suites( + &clk_leaf_mux_set_rate_parent_test_suite, &clk_test_suite, + &clk_multiple_parents_mux_test_suite, + &clk_mux_notifier_test_suite, + &clk_orphan_transparent_multiple_parent_mux_test_suite, &clk_orphan_transparent_single_parent_test_suite, + &clk_orphan_two_level_root_last_test_suite, &clk_range_test_suite, &clk_range_maximize_test_suite, - &clk_range_minimize_test_suite + &clk_range_minimize_test_suite, + &clk_single_parent_mux_test_suite, + &clk_uncached_test_suite ); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index 4421e4859257..ba1720b9e231 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -129,9 +129,18 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index) return 0; } +static int mtk_clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct mtk_clk_mux *mux = to_mtk_clk_mux(hw); + + return clk_mux_determine_rate_flags(hw, req, mux->data->flags); +} + const struct clk_ops mtk_mux_clr_set_upd_ops = { .get_parent = mtk_clk_mux_get_parent, .set_parent = mtk_clk_mux_set_parent_setclr_lock, + .determine_rate = mtk_clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops); @@ -141,6 +150,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = { .is_enabled = mtk_clk_mux_is_enabled, .get_parent = mtk_clk_mux_get_parent, .set_parent = mtk_clk_mux_set_parent_setclr_lock, + .determine_rate = mtk_clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops); diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 609c10f8d0d9..76551534f10d 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -915,6 +915,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw, req->best_parent_hw = p2; } + clk_hw_get_rate_range(req->best_parent_hw, + &parent_req.min_rate, &parent_req.max_rate); + + if (req->min_rate > parent_req.min_rate) + parent_req.min_rate = req->min_rate; + + if (req->max_rate < parent_req.max_rate) + parent_req.max_rate = req->max_rate; + ret = __clk_determine_rate(req->best_parent_hw, &parent_req); if (ret) return ret; diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c index 657e1154bb9b..a9eb6a9ac445 100644 --- a/drivers/clk/qcom/gcc-msm8660.c +++ b/drivers/clk/qcom/gcc-msm8660.c @@ -2767,17 +2767,6 @@ MODULE_DEVICE_TABLE(of, gcc_msm8660_match_table); static int gcc_msm8660_probe(struct platform_device *pdev) { - int ret; - struct device *dev = &pdev->dev; - - ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000); - if (ret) - return ret; - - ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 27000000); - if (ret) - return ret; - return qcom_cc_probe(pdev, &gcc_msm8660_desc); } diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index 41717ff707f6..ba8791303156 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -8,6 +8,7 @@ #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/clk/spear.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of_platform.h> diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c index 490701ac9e93..c192a9141b86 100644 --- a/drivers/clk/spear/spear6xx_clock.c +++ b/drivers/clk/spear/spear6xx_clock.c @@ -7,6 +7,7 @@ */ #include <linux/clkdev.h> +#include <linux/clk/spear.h> #include <linux/io.h> #include <linux/spinlock_types.h> #include "clk.h" diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index f7405a58877e..73303458e886 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -1166,6 +1166,7 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, { TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, { TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 }, + { TEGRA114_CLK_PWM, TEGRA114_CLK_PLL_P, 408000000, 0 }, /* must be the last entry */ { TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 }, }; diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index a9d4efcef2d4..6c46592d794e 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -1330,6 +1330,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = { { TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, { TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, { TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 }, + { TEGRA124_CLK_PWM, TEGRA124_CLK_PLL_P, 408000000, 0 }, /* must be the last entry */ { TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 }, }; diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 8a4514f6d503..422d78247553 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -1044,6 +1044,7 @@ static struct tegra_clk_init_table init_table[] = { { TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 }, { TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 }, { TEGRA20_CLK_VDE, TEGRA20_CLK_PLL_C, 300000000, 0 }, + { TEGRA20_CLK_PWM, TEGRA20_CLK_PLL_P, 48000000, 0 }, /* must be the last entry */ { TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 }, }; diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 499f999e91e1..a3488aaac3f7 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -3597,6 +3597,7 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 }, { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 }, + { TEGRA210_CLK_PWM, TEGRA210_CLK_PLL_P, 48000000, 0 }, /* This MUST be the last entry. */ { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 }, }; diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 168c07d5a5f2..60f1534711f1 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -1237,6 +1237,7 @@ static struct tegra_clk_init_table init_table[] = { { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 }, { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 }, { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 }, + { TEGRA30_CLK_PWM, TEGRA30_CLK_PLL_P, 48000000, 0 }, /* must be the last entry */ { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 }, }; diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c index acf31cc1dbcc..97086fab698e 100644 --- a/drivers/dax/hmem/device.c +++ b/drivers/dax/hmem/device.c @@ -48,7 +48,7 @@ void hmem_register_device(int target_nid, struct resource *r) rc = platform_device_add_data(pdev, &info, sizeof(info)); if (rc < 0) { pr_err("hmem memregion_info allocation failure for %pr\n", &res); - goto out_pdev; + goto out_resource; } rc = platform_device_add_resources(pdev, &res, 1); @@ -66,7 +66,7 @@ void hmem_register_device(int target_nid, struct resource *r) return; out_resource: - put_device(&pdev->dev); + platform_device_put(pdev); out_pdev: memregion_free(id); } diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 9b5e2a5eb0ae..da4438f3188c 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -363,7 +363,7 @@ static void dax_free_inode(struct inode *inode) { struct dax_device *dax_dev = to_dax_dev(inode); if (inode->i_rdev) - ida_simple_remove(&dax_minor_ida, iminor(inode)); + ida_free(&dax_minor_ida, iminor(inode)); kmem_cache_free(dax_cache, dax_dev); } @@ -445,7 +445,7 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) if (WARN_ON_ONCE(ops && !ops->zero_page_range)) return ERR_PTR(-EINVAL); - minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); + minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL); if (minor < 0) return ERR_PTR(-ENOMEM); @@ -459,7 +459,7 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) return dax_dev; err_dev: - ida_simple_remove(&dax_minor_ida, minor); + ida_free(&dax_minor_ida, minor); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(alloc_dax); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 17562cf1fe97..456602d373b7 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -473,7 +473,7 @@ config EDAC_ALTERA_SDMMC config EDAC_SIFIVE bool "Sifive platform EDAC driver" - depends on EDAC=y && SIFIVE_L2 + depends on EDAC=y && SIFIVE_CCACHE help Support for error detection and correction on the SiFive SoCs. diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index ee800aec7d47..b844e2626fd5 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -2,7 +2,7 @@ /* * SiFive Platform EDAC Driver * - * Copyright (C) 2018-2019 SiFive, Inc. + * Copyright (C) 2018-2022 SiFive, Inc. * * This driver is partially based on octeon_edac-pc.c * @@ -10,7 +10,7 @@ #include <linux/edac.h> #include <linux/platform_device.h> #include "edac_module.h" -#include <soc/sifive/sifive_l2_cache.h> +#include <soc/sifive/sifive_ccache.h> #define DRVNAME "sifive_edac" @@ -32,9 +32,9 @@ int ecc_err_event(struct notifier_block *this, unsigned long event, void *ptr) p = container_of(this, struct sifive_edac_priv, notifier); - if (event == SIFIVE_L2_ERR_TYPE_UE) + if (event == SIFIVE_CCACHE_ERR_TYPE_UE) edac_device_handle_ue(p->dci, 0, 0, msg); - else if (event == SIFIVE_L2_ERR_TYPE_CE) + else if (event == SIFIVE_CCACHE_ERR_TYPE_CE) edac_device_handle_ce(p->dci, 0, 0, msg); return NOTIFY_OK; @@ -67,7 +67,7 @@ static int ecc_register(struct platform_device *pdev) goto err; } - register_sifive_l2_error_notifier(&p->notifier); + register_sifive_ccache_error_notifier(&p->notifier); return 0; @@ -81,7 +81,7 @@ static int ecc_unregister(struct platform_device *pdev) { struct sifive_edac_priv *p = platform_get_drvdata(pdev); - unregister_sifive_l2_error_notifier(&p->notifier); + unregister_sifive_ccache_error_notifier(&p->notifier); edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(p->dci); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index c70c026c9a93..2797029bd500 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -223,7 +223,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) page = pfn_to_page(pfn); svm_range_bo_ref(prange->svm_bo); page->zone_device_data = prange->svm_bo; - lock_page(page); + zone_device_page_init(page); } static void @@ -410,7 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; - struct migrate_vma migrate; + struct migrate_vma migrate = { 0 }; unsigned long cpages = 0; dma_addr_t *scratch; void *buf; @@ -666,7 +666,7 @@ out_oom: static long svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, - uint32_t trigger) + uint32_t trigger, struct page *fault_page) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); uint64_t npages = (end - start) >> PAGE_SHIFT; @@ -674,7 +674,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, unsigned long cpages = 0; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; - struct migrate_vma migrate; + struct migrate_vma migrate = { 0 }; dma_addr_t *scratch; void *buf; int r = -ENOMEM; @@ -697,6 +697,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, migrate.src = buf; migrate.dst = migrate.src + npages; + migrate.fault_page = fault_page; scratch = (dma_addr_t *)(migrate.dst + npages); kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, @@ -764,7 +765,7 @@ out: * 0 - OK, otherwise error code */ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, - uint32_t trigger) + uint32_t trigger, struct page *fault_page) { struct amdgpu_device *adev; struct vm_area_struct *vma; @@ -805,7 +806,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, } next = min(vma->vm_end, end); - r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger); + r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger, + fault_page); if (r < 0) { pr_debug("failed %ld to migrate prange %p\n", r, prange); break; @@ -849,7 +851,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); do { - r = svm_migrate_vram_to_ram(prange, mm, trigger); + r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); if (r) return r; } while (prange->actual_loc && --retries); @@ -950,7 +952,8 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) } r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU); + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, + vmf->page); if (r) pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", r, prange->svms, prange, prange->start, prange->last); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index b3f0754b32fa..a5d7e6d22264 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -43,7 +43,7 @@ enum MIGRATION_COPY_DIR { int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, struct mm_struct *mm, uint32_t trigger); int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, - uint32_t trigger); + uint32_t trigger, struct page *fault_page); unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index f5913ba22174..64fdf63093a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2913,13 +2913,15 @@ retry_write_locked: */ if (prange->actual_loc) r = svm_migrate_vram_to_ram(prange, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + NULL); else r = 0; } } else { r = svm_migrate_vram_to_ram(prange, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + NULL); } if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", @@ -3278,7 +3280,8 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, return 0; if (!best_loc) { - r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH); + r = svm_migrate_vram_to_ram(prange, mm, + KFD_MIGRATE_TRIGGER_PREFETCH, NULL); *migrated = !r; return r; } @@ -3339,7 +3342,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) mutex_lock(&prange->migrate_mutex); do { r = svm_migrate_vram_to_ram(prange, mm, - KFD_MIGRATE_TRIGGER_TTM_EVICTION); + KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); } while (!r && prange->actual_loc && --retries); if (!r && prange->actual_loc) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f6a9e8fdd87d..c053cb79cd06 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8310,8 +8310,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) crtc, dm_new_crtc_state, cur_crc_src)) DRM_DEBUG_DRIVER("Failed to configure crc source"); } -#endif } +#endif } for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 16356611b5b9..5fe209107246 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -139,44 +139,24 @@ static void nouveau_dmem_fence_done(struct nouveau_fence **fence) } } -static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, - struct vm_fault *vmf, struct migrate_vma *args, - dma_addr_t *dma_addr) +static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage, + struct page *dpage, dma_addr_t *dma_addr) { struct device *dev = drm->dev->dev; - struct page *dpage, *spage; - struct nouveau_svmm *svmm; - - spage = migrate_pfn_to_page(args->src[0]); - if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE)) - return 0; - dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); - if (!dpage) - return VM_FAULT_SIGBUS; lock_page(dpage); *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, *dma_addr)) - goto error_free_page; + return -EIO; - svmm = spage->zone_device_data; - mutex_lock(&svmm->mutex); - nouveau_svmm_invalidate(svmm, args->start, args->end); if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr, - NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) - goto error_dma_unmap; - mutex_unlock(&svmm->mutex); + NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) { + dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + return -EIO; + } - args->dst[0] = migrate_pfn(page_to_pfn(dpage)); return 0; - -error_dma_unmap: - mutex_unlock(&svmm->mutex); - dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); -error_free_page: - __free_page(dpage); - return VM_FAULT_SIGBUS; } static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) @@ -184,9 +164,11 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) struct nouveau_drm *drm = page_to_drm(vmf->page); struct nouveau_dmem *dmem = drm->dmem; struct nouveau_fence *fence; + struct nouveau_svmm *svmm; + struct page *spage, *dpage; unsigned long src = 0, dst = 0; dma_addr_t dma_addr = 0; - vm_fault_t ret; + vm_fault_t ret = 0; struct migrate_vma args = { .vma = vmf->vma, .start = vmf->address, @@ -207,9 +189,25 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) if (!args.cpages) return 0; - ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr); - if (ret || dst == 0) + spage = migrate_pfn_to_page(src); + if (!spage || !(src & MIGRATE_PFN_MIGRATE)) + goto done; + + dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); + if (!dpage) + goto done; + + dst = migrate_pfn(page_to_pfn(dpage)); + + svmm = spage->zone_device_data; + mutex_lock(&svmm->mutex); + nouveau_svmm_invalidate(svmm, args.start, args.end); + ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr); + mutex_unlock(&svmm->mutex); + if (ret) { + ret = VM_FAULT_SIGBUS; goto done; + } nouveau_fence_new(dmem->migrate.chan, false, &fence); migrate_vma_pages(&args); @@ -326,7 +324,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm) return NULL; } - lock_page(page); + zone_device_page_init(page); return page; } @@ -369,6 +367,52 @@ nouveau_dmem_suspend(struct nouveau_drm *drm) mutex_unlock(&drm->dmem->mutex); } +/* + * Evict all pages mapping a chunk. + */ +static void +nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) +{ + unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT; + unsigned long *src_pfns, *dst_pfns; + dma_addr_t *dma_addrs; + struct nouveau_fence *fence; + + src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL); + dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL); + dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL); + + migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT, + npages); + + for (i = 0; i < npages; i++) { + if (src_pfns[i] & MIGRATE_PFN_MIGRATE) { + struct page *dpage; + + /* + * _GFP_NOFAIL because the GPU is going away and there + * is nothing sensible we can do if we can't copy the + * data back. + */ + dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL); + dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)); + nouveau_dmem_copy_one(chunk->drm, + migrate_pfn_to_page(src_pfns[i]), dpage, + &dma_addrs[i]); + } + } + + nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence); + migrate_device_pages(src_pfns, dst_pfns, npages); + nouveau_dmem_fence_done(&fence); + migrate_device_finalize(src_pfns, dst_pfns, npages); + kfree(src_pfns); + kfree(dst_pfns); + for (i = 0; i < npages; i++) + dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); + kfree(dma_addrs); +} + void nouveau_dmem_fini(struct nouveau_drm *drm) { @@ -380,8 +424,10 @@ nouveau_dmem_fini(struct nouveau_drm *drm) mutex_lock(&drm->dmem->mutex); list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { + nouveau_dmem_evict_chunk(chunk); nouveau_bo_unpin(chunk->bo); nouveau_bo_ref(NULL, &chunk->bo); + WARN_ON(chunk->callocated); list_del(&chunk->list); memunmap_pages(&chunk->pagemap); release_mem_region(chunk->pagemap.range.start, diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 7850287dfe7a..351c81a929a6 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1379,6 +1379,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr, I3C_ADDR_SLOT_I3C_DEV); + if (old_dyn_addr) + i3c_bus_set_addr_slot_status(&master->bus, old_dyn_addr, + I3C_ADDR_SLOT_FREE); } if (master->ops->reattach_i3c_dev) { @@ -1908,10 +1911,6 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, i3c_master_free_i3c_dev(olddev); } - ret = i3c_master_reattach_i3c_dev(newdev, old_dyn_addr); - if (ret) - goto err_detach_dev; - /* * Depending on our previous state, the expected dynamic address might * differ: diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c index 00aecd67e348..a7e052c1db53 100644 --- a/drivers/leds/leds-pca963x.c +++ b/drivers/leds/leds-pca963x.c @@ -101,6 +101,7 @@ struct pca963x_led { struct pca963x *chip; struct led_classdev led_cdev; int led_num; /* 0 .. 15 potentially */ + bool blinking; u8 gdc; u8 gfrq; }; @@ -129,12 +130,21 @@ static int pca963x_brightness(struct pca963x_led *led, switch (brightness) { case LED_FULL: - val = (ledout & ~mask) | (PCA963X_LED_ON << shift); + if (led->blinking) { + val = (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift); + ret = i2c_smbus_write_byte_data(client, + PCA963X_PWM_BASE + + led->led_num, + LED_FULL); + } else { + val = (ledout & ~mask) | (PCA963X_LED_ON << shift); + } ret = i2c_smbus_write_byte_data(client, ledout_addr, val); break; case LED_OFF: val = ledout & ~mask; ret = i2c_smbus_write_byte_data(client, ledout_addr, val); + led->blinking = false; break; default: ret = i2c_smbus_write_byte_data(client, @@ -144,7 +154,11 @@ static int pca963x_brightness(struct pca963x_led *led, if (ret < 0) return ret; - val = (ledout & ~mask) | (PCA963X_LED_PWM << shift); + if (led->blinking) + val = (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift); + else + val = (ledout & ~mask) | (PCA963X_LED_PWM << shift); + ret = i2c_smbus_write_byte_data(client, ledout_addr, val); break; } @@ -181,6 +195,7 @@ static void pca963x_blink(struct pca963x_led *led) } mutex_unlock(&led->chip->mutex); + led->blinking = true; } static int pca963x_power_state(struct pca963x_led *led) @@ -275,6 +290,8 @@ static int pca963x_blink_set(struct led_classdev *led_cdev, led->gfrq = gfrq; pca963x_blink(led); + led->led_cdev.brightness = LED_FULL; + pca963x_led_set(led_cdev, LED_FULL); *delay_on = time_on; *delay_off = time_off; @@ -337,6 +354,7 @@ static int pca963x_register_leds(struct i2c_client *client, led->led_cdev.brightness_set_blocking = pca963x_led_set; if (hw_blink) led->led_cdev.blink_set = pca963x_blink_set; + led->blinking = false; init_data.fwnode = child; /* for backwards compatibility */ diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 4cf67a2a0d04..75eaecc8639f 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -409,7 +409,7 @@ int ubiblock_create(struct ubi_volume_info *vi) ret = blk_mq_alloc_tag_set(&dev->tag_set); if (ret) { dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed"); - goto out_free_dev;; + goto out_free_dev; } @@ -441,7 +441,7 @@ int ubiblock_create(struct ubi_volume_info *vi) /* * Create one workqueue per volume (per registered block device). - * Rembember workqueues are cheap, they're not threads. + * Remember workqueues are cheap, they're not threads. */ dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); if (!dev->wq) { diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index a32050fecabf..a901f8edfa41 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -807,6 +807,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id) * @ubi_num: number to assign to the new UBI device * @vid_hdr_offset: VID header offset * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs + * @disable_fm: whether disable fastmap * * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in @@ -814,11 +815,15 @@ static int autoresize(struct ubi_device *ubi, int vol_id) * automatically. Returns the new UBI device number in case of success and a * negative error code in case of failure. * + * If @disable_fm is true, ubi doesn't create new fastmap even the module param + * 'fm_autoconvert' is set, and existed old fastmap will be destroyed after + * doing full scanning. + * * Note, the invocations of this function has to be serialized by the * @ubi_devices_mutex. */ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, - int vid_hdr_offset, int max_beb_per1024) + int vid_hdr_offset, int max_beb_per1024, bool disable_fm) { struct ubi_device *ubi; int i, err; @@ -921,7 +926,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, UBI_FM_MIN_POOL_SIZE); ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2; - ubi->fm_disabled = !fm_autoconvert; + ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0; if (fm_debug) ubi_enable_dbg_chk_fastmap(ubi); @@ -962,7 +967,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, if (!ubi->fm_buf) goto out_free; #endif - err = ubi_attach(ubi, 0); + err = ubi_attach(ubi, disable_fm ? 1 : 0); if (err) { ubi_err(ubi, "failed to attach mtd%d, error %d", mtd->index, err); @@ -1242,7 +1247,8 @@ static int __init ubi_init(void) mutex_lock(&ubi_devices_mutex); err = ubi_attach_mtd_dev(mtd, p->ubi_num, - p->vid_hdr_offs, p->max_beb_per1024); + p->vid_hdr_offs, p->max_beb_per1024, + false); mutex_unlock(&ubi_devices_mutex); if (err < 0) { pr_err("UBI error: cannot attach mtd%d\n", diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index cc9a28cf9d82..f43430b9c1e6 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -672,7 +672,7 @@ static int verify_rsvol_req(const struct ubi_device *ubi, * @req: volumes re-name request * * This is a helper function for the volume re-name IOCTL which validates the - * the request, opens the volume and calls corresponding volumes management + * request, opens the volume and calls corresponding volumes management * function. Returns zero in case of success and a negative error code in case * of failure. */ @@ -1041,7 +1041,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd, */ mutex_lock(&ubi_devices_mutex); err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset, - req.max_beb_per1024); + req.max_beb_per1024, !!req.disable_fm); mutex_unlock(&ubi_devices_mutex); if (err < 0) put_mtd_device(mtd); diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index ccc5979642b7..09c408c45a62 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -377,7 +377,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) * * This function locks a logical eraseblock for writing if there is no * contention and does nothing if there is contention. Returns %0 in case of - * success, %1 in case of contention, and and a negative error code in case of + * success, %1 in case of contention, and a negative error code in case of * failure. */ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 6e95c4b1473e..ca2d9efe62c3 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -20,8 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi) if (!ubi_dbg_chk_fastmap(ubi)) return NULL; - ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long), - GFP_KERNEL); + ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); @@ -34,7 +33,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi) */ static inline void free_seen(unsigned long *seen) { - kfree(seen); + bitmap_free(seen); } /** @@ -1108,8 +1107,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) if (!ubi->fast_attach) return 0; - vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long), - GFP_KERNEL); + vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL); if (!vol->checkmap) return -ENOMEM; @@ -1118,7 +1116,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) { - kfree(vol->checkmap); + bitmap_free(vol->checkmap); } /** diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 8a7306cc1947..01b644861253 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -1147,7 +1147,7 @@ fail: * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * - * This function returns zero if the erase counter header is all right and and + * This function returns zero if the erase counter header is all right and * a negative error code if not or if an error occurred. */ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 386db0598e95..2c9cd3b6434f 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -131,7 +131,7 @@ enum { * is changed radically. This field is duplicated in the volume identifier * header. * - * The @vid_hdr_offset and @data_offset fields contain the offset of the the + * The @vid_hdr_offset and @data_offset fields contain the offset of the * volume identifier header and user data, relative to the beginning of the * physical eraseblock. These values have to be the same for all physical * eraseblocks. diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 078112e23dfd..c8f1bd4fa100 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -86,7 +86,7 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...); * Error codes returned by the I/O sub-system. * * UBI_IO_FF: the read region of flash contains only 0xFFs - * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data + * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also there was a data * integrity error reported by the MTD driver * (uncorrectable ECC error in case of NAND) * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) @@ -281,7 +281,7 @@ struct ubi_eba_leb_desc { /** * struct ubi_volume - UBI volume description data structure. - * @dev: device object to make use of the the Linux device model + * @dev: device object to make use of the Linux device model * @cdev: character device object to create character device * @ubi: reference to the UBI device description object * @vol_id: volume ID @@ -439,7 +439,7 @@ struct ubi_debug_info { /** * struct ubi_device - UBI device description structure - * @dev: UBI device object to use the the Linux device model + * @dev: UBI device object to use the Linux device model * @cdev: character device object to create character device * @ubi_num: UBI device number * @ubi_name: UBI device name @@ -937,7 +937,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, /* build.c */ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, - int vid_hdr_offset, int max_beb_per1024); + int vid_hdr_offset, int max_beb_per1024, + bool disable_fm); int ubi_detach_mtd_dev(int ubi_num, int anyway); struct ubi_device *ubi_get_device(int ubi_num); void ubi_put_device(struct ubi_device *ubi); diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 6ea95ade4ca6..8fcc0bdf0635 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -623,7 +623,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) * @ubi: UBI device description object * @vol_id: volume ID * - * Returns zero if volume is all right and a a negative error code if not. + * Returns zero if volume is all right and a negative error code if not. */ static int self_check_volume(struct ubi_device *ubi, int vol_id) { @@ -776,7 +776,7 @@ fail: * self_check_volumes - check information about all volumes. * @ubi: UBI device description object * - * Returns zero if volumes are all right and a a negative error code if not. + * Returns zero if volumes are all right and a negative error code if not. */ static int self_check_volumes(struct ubi_device *ubi) { diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 55bae06cf408..68eb0f21b3fe 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -376,7 +376,7 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, * refill_wl_user_pool(). * @ubi: UBI device description object * - * This function returns a a wear leveling entry in case of success and + * This function returns a wear leveling entry in case of success and * NULL in case of failure. */ static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi) @@ -429,7 +429,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum) /** * sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object - * @e: the the physical eraseblock to erase + * @e: the physical eraseblock to erase * @torture: if the physical eraseblock has to be tortured * * This function returns zero in case of success and a negative error code in @@ -1016,7 +1016,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) /* * If the ubi->scrub tree is not empty, scrubbing is needed, and the - * the WL worker has to be scheduled anyway. + * WL worker has to be scheduled anyway. */ if (!ubi->scrub.rb_node) { #ifdef CONFIG_MTD_UBI_FASTMAP @@ -1464,7 +1464,7 @@ static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e) * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed. * @ubi: UBI device description object * @pnum: the physical eraseblock to schedule - * @force: dont't read the block, assume bitflips happened and take action. + * @force: don't read the block, assume bitflips happened and take action. * * This function reads the given eraseblock and checks if bitflips occured. * In case of bitflips, the eraseblock is scheduled for scrubbing. diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index bbe5099c836d..c60ec0b373c5 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -170,15 +170,12 @@ EXPORT_SYMBOL(nvdimm_namespace_disk_name); const uuid_t *nd_dev_to_uuid(struct device *dev) { - if (!dev) - return &uuid_null; - - if (is_namespace_pmem(dev)) { + if (dev && is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); return nspm->uuid; - } else - return &uuid_null; + } + return &uuid_null; } EXPORT_SYMBOL(nd_dev_to_uuid); @@ -388,7 +385,7 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, * * BLK-space is valid as long as it does not precede a PMEM * allocation in a given region. PMEM-space must be contiguous - * and adjacent to an existing existing allocation (if one + * and adjacent to an existing allocation (if one * exists). If reserving PMEM any space is valid. */ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, @@ -839,7 +836,6 @@ static ssize_t size_store(struct device *dev, { struct nd_region *nd_region = to_nd_region(dev->parent); unsigned long long val; - uuid_t **uuid = NULL; int rc; rc = kstrtoull(buf, 0, &val); @@ -853,16 +849,12 @@ static ssize_t size_store(struct device *dev, if (rc >= 0) rc = nd_namespace_label_update(nd_region, dev); - if (is_namespace_pmem(dev)) { + /* setting size zero == 'delete namespace' */ + if (rc == 0 && val == 0 && is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); - uuid = &nspm->uuid; - } - - if (rc == 0 && val == 0 && uuid) { - /* setting size zero == 'delete namespace' */ - kfree(*uuid); - *uuid = NULL; + kfree(nspm->uuid); + nspm->uuid = NULL; } dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 473a71bbd9c9..e0875d369762 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -509,16 +509,13 @@ static ssize_t align_store(struct device *dev, { struct nd_region *nd_region = to_nd_region(dev); unsigned long val, dpa; - u32 remainder; + u32 mappings, remainder; int rc; rc = kstrtoul(buf, 0, &val); if (rc) return rc; - if (!nd_region->ndr_mappings) - return -ENXIO; - /* * Ensure space-align is evenly divisible by the region * interleave-width because the kernel typically has no facility @@ -526,7 +523,8 @@ static ssize_t align_store(struct device *dev, * contribute to the tail capacity in system-physical-address * space for the namespace. */ - dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder); + mappings = max_t(u32, 1, nd_region->ndr_mappings); + dpa = div_u64_rem(val, mappings, &remainder); if (!is_power_of_2(dpa) || dpa < PAGE_SIZE || val > region_size(nd_region) || remainder) return -EINVAL; @@ -1096,7 +1094,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) return rc; } /** - * nvdimm_flush - flush any posted write queues between the cpu and pmem media + * generic_nvdimm_flush() - flush any posted write queues between the cpu and pmem media * @nd_region: interleaved pmem region */ int generic_nvdimm_flush(struct nd_region *nd_region) diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index b5aa55c61461..8aefb60c42ff 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -408,7 +408,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) return rc; } -void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm) +static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev); int rc; diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c index f54a6f450391..f0cb31198a8f 100644 --- a/drivers/parisc/eisa_enumerator.c +++ b/drivers/parisc/eisa_enumerator.c @@ -393,7 +393,7 @@ static int parse_slot_config(int slot, } if (p0 + function_len < pos) { - printk(KERN_ERR "eisa_enumerator: function %d length mis-match " + printk(KERN_ERR "eisa_enumerator: function %d length mismatch " "got %d, expected %d\n", num_func, pos-p0, function_len); res=-1; @@ -407,13 +407,13 @@ static int parse_slot_config(int slot, } if (pos != es->config_data_length) { - printk(KERN_ERR "eisa_enumerator: config data length mis-match got %d, expected %d\n", + printk(KERN_ERR "eisa_enumerator: config data length mismatch got %d, expected %d\n", pos, es->config_data_length); res=-1; } if (num_func != es->num_functions) { - printk(KERN_ERR "eisa_enumerator: number of functions mis-match got %d, expected %d\n", + printk(KERN_ERR "eisa_enumerator: number of functions mismatch got %d, expected %d\n", num_func, es->num_functions); res=-2; } @@ -451,7 +451,7 @@ static int init_slot(int slot, struct eeprom_eisa_slot_info *es) } if (es->eisa_slot_id != id) { print_eisa_id(id_string, id); - printk(KERN_ERR "EISA slot %d id mis-match: got %s", + printk(KERN_ERR "EISA slot %d id mismatch: got %s", slot, id_string); print_eisa_id(id_string, es->eisa_slot_id); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index dc6a30ee6edf..b4096598dbcb 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1768,10 +1768,7 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res, } res->end = res->start + new_size - 1; - - /* If the resource is part of the add_list remove it now */ - if (add_list) - remove_from_list(add_list, res); + remove_from_list(add_list, res); } static void pci_bus_distribute_available_resources(struct pci_bus *bus, @@ -1926,8 +1923,6 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge, if (!bridge->is_hotplug_bridge) return; - pci_dbg(bridge, "distributing available resources\n"); - /* Take the initial extra resources from the hotplug port */ available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW]; available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW]; @@ -1939,59 +1934,6 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge, available_mmio_pref); } -static bool pci_bridge_resources_not_assigned(struct pci_dev *dev) -{ - const struct resource *r; - - /* - * Check the child device's resources and if they are not yet - * assigned it means we are configuring them (not the boot - * firmware) so we should be able to extend the upstream - * bridge's (that's the hotplug downstream PCIe port) resources - * in the same way we do with the normal hotplug case. - */ - r = &dev->resource[PCI_BRIDGE_IO_WINDOW]; - if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN)) - return false; - r = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; - if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN)) - return false; - r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; - if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN)) - return false; - - return true; -} - -static void pci_root_bus_distribute_available_resources(struct pci_bus *bus, - struct list_head *add_list) -{ - struct pci_dev *dev, *bridge = bus->self; - - for_each_pci_bridge(dev, bus) { - struct pci_bus *b; - - b = dev->subordinate; - if (!b) - continue; - - /* - * Need to check "bridge" here too because it is NULL - * in case of root bus. - */ - if (bridge && pci_bridge_resources_not_assigned(dev)) { - pci_bridge_distribute_available_resources(bridge, add_list); - /* - * There is only PCIe upstream port on the bus - * so we don't need to go futher. - */ - return; - } - - pci_root_bus_distribute_available_resources(b, add_list); - } -} - /* * First try will not touch PCI bridge res. * Second and later try will clear small leaf bridge res. @@ -2031,8 +1973,6 @@ again: */ __pci_bus_size_bridges(bus, add_list); - pci_root_bus_distribute_available_resources(bus, add_list); - /* Depth last, allocate resources and update the hardware. */ __pci_bus_assign_resources(bus, add_list, &fail_head); if (add_list) diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 44c07ea487f4..341010f20b77 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -185,7 +185,7 @@ config APPLE_M1_CPU_PMU config ALIBABA_UNCORE_DRW_PMU tristate "Alibaba T-Head Yitian 710 DDR Sub-system Driveway PMU driver" - depends on ARM64 || COMPILE_TEST + depends on (ARM64 && ACPI) || COMPILE_TEST help Support for Driveway PMU events monitoring on Yitian 710 DDR Sub-system. diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c index 82729b874f09..a7689fecb49d 100644 --- a/drivers/perf/alibaba_uncore_drw_pmu.c +++ b/drivers/perf/alibaba_uncore_drw_pmu.c @@ -658,8 +658,8 @@ static int ali_drw_pmu_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res); - if (!drw_pmu->cfg_base) - return -ENOMEM; + if (IS_ERR(drw_pmu->cfg_base)) + return PTR_ERR(drw_pmu->cfg_base); name = devm_kasprintf(drw_pmu->dev, GFP_KERNEL, "ali_drw_%llx", (u64) (res->start >> ALI_DRW_PMU_PA_SHIFT)); diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 15e5a47be7d5..3852c18362f5 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -652,8 +652,11 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node); struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); - /* Enable the access for TIME csr only from the user mode now */ - csr_write(CSR_SCOUNTEREN, 0x2); + /* + * Enable the access for CYCLE, TIME, and INSTRET CSRs from userspace, + * as is necessary to maintain uABI compatibility. + */ + csr_write(CSR_SCOUNTEREN, 0x7); /* Stop all the counters so that they can be enabled from perf */ pmu_sbi_stop_all(pmu); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index b8de25118ad0..bb63edb507da 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -423,6 +423,7 @@ config RTC_DRV_ISL1208 config RTC_DRV_ISL12022 tristate "Intersil ISL12022" + select REGMAP_I2C help If you say yes here you get support for the Intersil ISL12022 RTC chip. diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index bdb1df843c78..610413b4e9ca 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -1352,10 +1352,10 @@ static void cmos_check_acpi_rtc_status(struct device *dev, static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { - cmos_wake_setup(&pnp->dev); + int irq, ret; if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { - unsigned int irq = 0; + irq = 0; #ifdef CONFIG_X86 /* Some machines contain a PNP entry for the RTC, but * don't define the IRQ. It should always be safe to @@ -1364,13 +1364,17 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) if (nr_legacy_irqs()) irq = RTC_IRQ; #endif - return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); } else { - return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), - pnp_irq(pnp, 0)); + irq = pnp_irq(pnp, 0); } + + ret = cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); + if (ret) + return ret; + + cmos_wake_setup(&pnp->dev); + + return 0; } static void cmos_pnp_remove(struct pnp_dev *pnp) @@ -1454,10 +1458,9 @@ static inline void cmos_of_init(struct platform_device *pdev) {} static int __init cmos_platform_probe(struct platform_device *pdev) { struct resource *resource; - int irq; + int irq, ret; cmos_of_init(pdev); - cmos_wake_setup(&pdev->dev); if (RTC_IOMAPPED) resource = platform_get_resource(pdev, IORESOURCE_IO, 0); @@ -1467,7 +1470,13 @@ static int __init cmos_platform_probe(struct platform_device *pdev) if (irq < 0) irq = -1; - return cmos_do_probe(&pdev->dev, resource, irq); + ret = cmos_do_probe(&pdev->dev, resource, irq); + if (ret) + return ret; + + cmos_wake_setup(&pdev->dev); + + return 0; } static int cmos_platform_remove(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index a24331ba8a5f..5db9c737c022 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c @@ -132,7 +132,7 @@ ds1685_rtc_bin2bcd(struct ds1685_priv *rtc, u8 val, u8 bin_mask, u8 bcd_mask) } /** - * s1685_rtc_check_mday - check validity of the day of month. + * ds1685_rtc_check_mday - check validity of the day of month. * @rtc: pointer to the ds1685 rtc structure. * @mday: day of month. * diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c index c2717bb52b2b..c828bc8e05b9 100644 --- a/drivers/rtc/rtc-gamecube.c +++ b/drivers/rtc/rtc-gamecube.c @@ -265,18 +265,17 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d) * SRAM address as on previous consoles. */ ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias); - if (ret) { - pr_err("failed to get the RTC bias\n"); - iounmap(hw_srnprot); - return -1; - } /* Reset SRAM access to how it was before, our job here is done. */ if (old != 0x7bf) iowrite32be(old, hw_srnprot); + iounmap(hw_srnprot); - return 0; + if (ret) + pr_err("failed to get the RTC bias\n"); + + return ret; } static const struct regmap_range rtc_rd_ranges[] = { diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c index 79461ded1a48..ca677c4265e6 100644 --- a/drivers/rtc/rtc-isl12022.c +++ b/drivers/rtc/rtc-isl12022.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/regmap.h> /* ISL register offsets */ #define ISL12022_REG_SC 0x00 @@ -42,83 +43,32 @@ static struct i2c_driver isl12022_driver; struct isl12022 { struct rtc_device *rtc; - - bool write_enabled; /* true if write enable is set */ + struct regmap *regmap; }; - -static int isl12022_read_regs(struct i2c_client *client, uint8_t reg, - uint8_t *data, size_t n) -{ - struct i2c_msg msgs[] = { - { - .addr = client->addr, - .flags = 0, - .len = 1, - .buf = data - }, /* setup read ptr */ - { - .addr = client->addr, - .flags = I2C_M_RD, - .len = n, - .buf = data - } - }; - - int ret; - - data[0] = reg; - ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (ret != ARRAY_SIZE(msgs)) { - dev_err(&client->dev, "%s: read error, ret=%d\n", - __func__, ret); - return -EIO; - } - - return 0; -} - - -static int isl12022_write_reg(struct i2c_client *client, - uint8_t reg, uint8_t val) -{ - uint8_t data[2] = { reg, val }; - int err; - - err = i2c_master_send(client, data, sizeof(data)); - if (err != sizeof(data)) { - dev_err(&client->dev, - "%s: err=%d addr=%02x, data=%02x\n", - __func__, err, data[0], data[1]); - return -EIO; - } - - return 0; -} - - /* * In the routines that deal directly with the isl12022 hardware, we use * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. */ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm) { - struct i2c_client *client = to_i2c_client(dev); + struct isl12022 *isl12022 = dev_get_drvdata(dev); + struct regmap *regmap = isl12022->regmap; uint8_t buf[ISL12022_REG_INT + 1]; int ret; - ret = isl12022_read_regs(client, ISL12022_REG_SC, buf, sizeof(buf)); + ret = regmap_bulk_read(regmap, ISL12022_REG_SC, buf, sizeof(buf)); if (ret) return ret; if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) { - dev_warn(&client->dev, + dev_warn(dev, "voltage dropped below %u%%, " "date and time is not reliable.\n", buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75); } - dev_dbg(&client->dev, + dev_dbg(dev, "%s: raw data is sec=%02x, min=%02x, hr=%02x, " "mday=%02x, mon=%02x, year=%02x, wday=%02x, " "sr=%02x, int=%02x", @@ -141,65 +91,25 @@ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_mon = bcd2bin(buf[ISL12022_REG_MO] & 0x1F) - 1; tm->tm_year = bcd2bin(buf[ISL12022_REG_YR]) + 100; - dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " - "mday=%d, mon=%d, year=%d, wday=%d\n", - __func__, - tm->tm_sec, tm->tm_min, tm->tm_hour, - tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + dev_dbg(dev, "%s: %ptR\n", __func__, tm); return 0; } static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm) { - struct i2c_client *client = to_i2c_client(dev); - struct isl12022 *isl12022 = i2c_get_clientdata(client); - size_t i; + struct isl12022 *isl12022 = dev_get_drvdata(dev); + struct regmap *regmap = isl12022->regmap; int ret; uint8_t buf[ISL12022_REG_DW + 1]; - dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " - "mday=%d, mon=%d, year=%d, wday=%d\n", - __func__, - tm->tm_sec, tm->tm_min, tm->tm_hour, - tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); - - if (!isl12022->write_enabled) { - - ret = isl12022_read_regs(client, ISL12022_REG_INT, buf, 1); - if (ret) - return ret; - - /* Check if WRTC (write rtc enable) is set factory default is - * 0 (not set) */ - if (!(buf[0] & ISL12022_INT_WRTC)) { - dev_info(&client->dev, - "init write enable and 24 hour format\n"); - - /* Set the write enable bit. */ - ret = isl12022_write_reg(client, - ISL12022_REG_INT, - buf[0] | ISL12022_INT_WRTC); - if (ret) - return ret; - - /* Write to any RTC register to start RTC, we use the - * HR register, setting the MIL bit to use the 24 hour - * format. */ - ret = isl12022_read_regs(client, ISL12022_REG_HR, - buf, 1); - if (ret) - return ret; - - ret = isl12022_write_reg(client, - ISL12022_REG_HR, - buf[0] | ISL12022_HR_MIL); - if (ret) - return ret; - } - - isl12022->write_enabled = true; - } + dev_dbg(dev, "%s: %ptR\n", __func__, tm); + + /* Ensure the write enable bit is set. */ + ret = regmap_update_bits(regmap, ISL12022_REG_INT, + ISL12022_INT_WRTC, ISL12022_INT_WRTC); + if (ret) + return ret; /* hours, minutes and seconds */ buf[ISL12022_REG_SC] = bin2bcd(tm->tm_sec); @@ -216,15 +126,8 @@ static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm) buf[ISL12022_REG_DW] = tm->tm_wday & 0x07; - /* write register's data */ - for (i = 0; i < ARRAY_SIZE(buf); i++) { - ret = isl12022_write_reg(client, ISL12022_REG_SC + i, - buf[ISL12022_REG_SC + i]); - if (ret) - return -EIO; - } - - return 0; + return regmap_bulk_write(isl12022->regmap, ISL12022_REG_SC, + buf, sizeof(buf)); } static const struct rtc_class_ops isl12022_rtc_ops = { @@ -232,6 +135,12 @@ static const struct rtc_class_ops isl12022_rtc_ops = { .set_time = isl12022_rtc_set_time, }; +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .use_single_write = true, +}; + static int isl12022_probe(struct i2c_client *client) { struct isl12022 *isl12022; @@ -243,13 +152,23 @@ static int isl12022_probe(struct i2c_client *client) GFP_KERNEL); if (!isl12022) return -ENOMEM; + dev_set_drvdata(&client->dev, isl12022); + + isl12022->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(isl12022->regmap)) { + dev_err(&client->dev, "regmap allocation failed\n"); + return PTR_ERR(isl12022->regmap); + } + + isl12022->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(isl12022->rtc)) + return PTR_ERR(isl12022->rtc); - i2c_set_clientdata(client, isl12022); + isl12022->rtc->ops = &isl12022_rtc_ops; + isl12022->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + isl12022->rtc->range_max = RTC_TIMESTAMP_END_2099; - isl12022->rtc = devm_rtc_device_register(&client->dev, - isl12022_driver.driver.name, - &isl12022_rtc_ops, THIS_MODULE); - return PTR_ERR_OR_ZERO(isl12022->rtc); + return devm_rtc_register_device(isl12022->rtc); } #ifdef CONFIG_OF diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index 6e51df72fd65..c383719292c7 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c @@ -257,11 +257,6 @@ static void jz4740_rtc_power_off(void) kernel_halt(); } -static void jz4740_rtc_clk_disable(void *data) -{ - clk_disable_unprepare(data); -} - static const struct of_device_id jz4740_rtc_of_match[] = { { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 }, { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 }, @@ -329,23 +324,9 @@ static int jz4740_rtc_probe(struct platform_device *pdev) if (IS_ERR(rtc->base)) return PTR_ERR(rtc->base); - clk = devm_clk_get(dev, "rtc"); - if (IS_ERR(clk)) { - dev_err(dev, "Failed to get RTC clock\n"); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret) { - dev_err(dev, "Failed to enable clock\n"); - return ret; - } - - ret = devm_add_action_or_reset(dev, jz4740_rtc_clk_disable, clk); - if (ret) { - dev_err(dev, "Failed to register devm action\n"); - return ret; - } + clk = devm_clk_get_enabled(dev, "rtc"); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "Failed to get RTC clock\n"); spin_lock_init(&rtc->lock); diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c index f14d1925e0c9..2a479d44f198 100644 --- a/drivers/rtc/rtc-mpfs.c +++ b/drivers/rtc/rtc-mpfs.c @@ -193,23 +193,6 @@ static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -static inline struct clk *mpfs_rtc_init_clk(struct device *dev) -{ - struct clk *clk; - int ret; - - clk = devm_clk_get(dev, "rtc"); - if (IS_ERR(clk)) - return clk; - - ret = clk_prepare_enable(clk); - if (ret) - return ERR_PTR(ret); - - devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk); - return clk; -} - static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev) { struct mpfs_rtc_dev *rtcdev = dev; @@ -233,7 +216,7 @@ static int mpfs_rtc_probe(struct platform_device *pdev) { struct mpfs_rtc_dev *rtcdev; struct clk *clk; - u32 prescaler; + unsigned long prescaler; int wakeup_irq, ret; rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL); @@ -251,7 +234,7 @@ static int mpfs_rtc_probe(struct platform_device *pdev) /* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */ rtcdev->rtc->range_max = GENMASK_ULL(42, 0); - clk = mpfs_rtc_init_clk(&pdev->dev); + clk = devm_clk_get_enabled(&pdev->dev, "rtc"); if (IS_ERR(clk)) return PTR_ERR(clk); @@ -275,14 +258,13 @@ static int mpfs_rtc_probe(struct platform_device *pdev) /* prescaler hardware adds 1 to reg value */ prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1; - if (prescaler > MAX_PRESCALER_COUNT) { - dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler); + dev_dbg(&pdev->dev, "invalid prescaler %lu\n", prescaler); return -EINVAL; } writel(prescaler, rtcdev->base + PRESCALER_REG); - dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler); + dev_info(&pdev->dev, "prescaler set to: %lu\n", prescaler); device_init_wakeup(&pdev->dev, true); ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq); diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 53d4e253e81f..762cf03345f1 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -291,14 +291,6 @@ static const struct rtc_class_ops mxc_rtc_ops = { .alarm_irq_enable = mxc_rtc_alarm_irq_enable, }; -static void mxc_rtc_action(void *p) -{ - struct rtc_plat_data *pdata = p; - - clk_disable_unprepare(pdata->clk_ref); - clk_disable_unprepare(pdata->clk_ipg); -} - static int mxc_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; @@ -341,33 +333,18 @@ static int mxc_rtc_probe(struct platform_device *pdev) rtc->range_max = (1 << 16) * 86400ULL - 1; } - pdata->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + pdata->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg"); if (IS_ERR(pdata->clk_ipg)) { dev_err(&pdev->dev, "unable to get ipg clock!\n"); return PTR_ERR(pdata->clk_ipg); } - ret = clk_prepare_enable(pdata->clk_ipg); - if (ret) - return ret; - - pdata->clk_ref = devm_clk_get(&pdev->dev, "ref"); + pdata->clk_ref = devm_clk_get_enabled(&pdev->dev, "ref"); if (IS_ERR(pdata->clk_ref)) { - clk_disable_unprepare(pdata->clk_ipg); dev_err(&pdev->dev, "unable to get ref clock!\n"); return PTR_ERR(pdata->clk_ref); } - ret = clk_prepare_enable(pdata->clk_ref); - if (ret) { - clk_disable_unprepare(pdata->clk_ipg); - return ret; - } - - ret = devm_add_action_or_reset(&pdev->dev, mxc_rtc_action, pdata); - if (ret) - return ret; - rate = clk_get_rate(pdata->clk_ref); if (rate == 32768) diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c index cdc623b3e365..dd170e3efd83 100644 --- a/drivers/rtc/rtc-rv3028.c +++ b/drivers/rtc/rtc-rv3028.c @@ -521,10 +521,9 @@ static int rv3028_param_get(struct device *dev, struct rtc_param *param) { struct rv3028_data *rv3028 = dev_get_drvdata(dev); int ret; + u32 value; switch(param->param) { - u32 value; - case RTC_PARAM_BACKUP_SWITCH_MODE: ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &value); if (ret < 0) @@ -554,9 +553,9 @@ static int rv3028_param_get(struct device *dev, struct rtc_param *param) static int rv3028_param_set(struct device *dev, struct rtc_param *param) { struct rv3028_data *rv3028 = dev_get_drvdata(dev); + u8 mode; switch(param->param) { - u8 mode; case RTC_PARAM_BACKUP_SWITCH_MODE: switch (param->uvalue) { case RTC_BSM_DISABLED: diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 40c0f7ed36e0..aae40d20d086 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c @@ -107,6 +107,8 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) wdt_pdev->dev.parent = &rtc_pdev->dev; wdt_pdev->dev.platform_data = &wdt_pdata; rc = platform_device_add(wdt_pdev); + if (rc) + platform_device_put(wdt_pdev); } if (rc) diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c index 7a0f181d3fef..ba23163cc042 100644 --- a/drivers/rtc/rtc-ti-k3.c +++ b/drivers/rtc/rtc-ti-k3.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/sys_soc.h> #include <linux/property.h> #include <linux/regmap.h> #include <linux/rtc.h> @@ -45,14 +46,6 @@ #define K3RTC_MIN_OFFSET (-277761) #define K3RTC_MAX_OFFSET (277778) -/** - * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc - * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327) - */ -struct ti_k3_rtc_soc_data { - const bool unlock_irq_erratum; -}; - static const struct regmap_config ti_k3_rtc_regmap_config = { .name = "peripheral-registers", .reg_bits = 32, @@ -118,7 +111,6 @@ static const struct reg_field ti_rtc_reg_fields[] = { * @rtc_dev: rtc device * @regmap: rtc mmio regmap * @r_fields: rtc register fields - * @soc: SoC compatible match data */ struct ti_k3_rtc { unsigned int irq; @@ -127,7 +119,6 @@ struct ti_k3_rtc { struct rtc_device *rtc_dev; struct regmap *regmap; struct regmap_field *r_fields[K3_RTC_MAX_FIELDS]; - const struct ti_k3_rtc_soc_data *soc; }; static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f) @@ -190,11 +181,22 @@ static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv) /* Skip fence since we are going to check the unlock bit as fence */ ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret, - !ret, 2, priv->sync_timeout_us); + ret, 2, priv->sync_timeout_us); return ret; } +/* + * This is the list of SoCs affected by TI's i2327 errata causing the RTC + * state-machine to break if not unlocked fast enough during boot. These + * SoCs must have the bootloader unlock this device very early in the + * boot-flow before we (Linux) can use this device. + */ +static const struct soc_device_attribute has_erratum_i2327[] = { + { .family = "AM62X", .revision = "SR1.0" }, + { /* sentinel */ } +}; + static int k3rtc_configure(struct device *dev) { int ret; @@ -208,7 +210,7 @@ static int k3rtc_configure(struct device *dev) * * In such occurrence, it is assumed that the RTC module is unusable */ - if (priv->soc->unlock_irq_erratum) { + if (soc_device_match(has_erratum_i2327)) { ret = k3rtc_check_unlocked(priv); /* If there is an error OR if we are locked, return error */ if (ret) { @@ -513,21 +515,12 @@ static struct nvmem_config ti_k3_rtc_nvmem_config = { static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv) { - int ret; struct clk *clk; - clk = devm_clk_get(dev, "osc32k"); + clk = devm_clk_get_enabled(dev, "osc32k"); if (IS_ERR(clk)) return PTR_ERR(clk); - ret = clk_prepare_enable(clk); - if (ret) - return ret; - - ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk); - if (ret) - return ret; - priv->rate_32k = clk_get_rate(clk); /* Make sure we are exact 32k clock. Else, try to compensate delay */ @@ -542,24 +535,19 @@ static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv) */ priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4); - return ret; + return 0; } static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv) { - int ret; struct clk *clk; /* Note: VBUS isn't a context clock, it is needed for hardware operation */ - clk = devm_clk_get(dev, "vbus"); + clk = devm_clk_get_enabled(dev, "vbus"); if (IS_ERR(clk)) return PTR_ERR(clk); - ret = clk_prepare_enable(clk); - if (ret) - return ret; - - return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk); + return 0; } static int ti_k3_rtc_probe(struct platform_device *pdev) @@ -602,8 +590,6 @@ static int ti_k3_rtc_probe(struct platform_device *pdev) if (IS_ERR(priv->rtc_dev)) return PTR_ERR(priv->rtc_dev); - priv->soc = of_device_get_match_data(dev); - priv->rtc_dev->ops = &ti_k3_rtc_ops; priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */ ti_k3_rtc_nvmem_config.priv = priv; @@ -635,12 +621,8 @@ static int ti_k3_rtc_probe(struct platform_device *pdev) return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config); } -static const struct ti_k3_rtc_soc_data ti_k3_am62_data = { - .unlock_irq_erratum = true, -}; - static const struct of_device_id ti_k3_rtc_of_match_table[] = { - {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data}, + {.compatible = "ti,am62-rtc" }, {} }; MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table); diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 68f49e2e964c..131293f7f152 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -15,12 +15,14 @@ #include <linux/cdev.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/kobject.h> #include <linux/uaccess.h> #include <asm/cio.h> #include <asm/ccwdev.h> #include <asm/debug.h> #include <asm/diag.h> +#include <asm/scsw.h> #include "vmur.h" @@ -78,6 +80,8 @@ static struct ccw_driver ur_driver = { static DEFINE_MUTEX(vmur_mutex); +static void ur_uevent(struct work_struct *ws); + /* * Allocation, freeing, getting and putting of urdev structures * @@ -108,6 +112,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev) ccw_device_get_id(cdev, &urd->dev_id); mutex_init(&urd->io_mutex); init_waitqueue_head(&urd->wait); + INIT_WORK(&urd->uevent_work, ur_uevent); spin_lock_init(&urd->open_lock); refcount_set(&urd->ref_count, 1); urd->cdev = cdev; @@ -275,6 +280,18 @@ out: return rc; } +static void ur_uevent(struct work_struct *ws) +{ + struct urdev *urd = container_of(ws, struct urdev, uevent_work); + char *envp[] = { + "EVENT=unsol_de", /* Unsolicited device-end interrupt */ + NULL + }; + + kobject_uevent_env(&urd->cdev->dev.kobj, KOBJ_CHANGE, envp); + urdev_put(urd); +} + /* * ur interrupt handler, called from the ccw_device layer */ @@ -288,12 +305,21 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, irb->scsw.cmd.count); } + urd = dev_get_drvdata(&cdev->dev); if (!intparm) { TRACE("ur_int_handler: unsolicited interrupt\n"); + + if (scsw_dstat(&irb->scsw) & DEV_STAT_DEV_END) { + /* + * Userspace might be interested in a transition to + * device-ready state. + */ + urdev_get(urd); + schedule_work(&urd->uevent_work); + } + return; } - urd = dev_get_drvdata(&cdev->dev); - BUG_ON(!urd); /* On special conditions irb is an error pointer */ if (IS_ERR(irb)) urd->io_request_rc = PTR_ERR(irb); @@ -809,7 +835,6 @@ static int ur_probe(struct ccw_device *cdev) rc = -ENOMEM; goto fail_urdev_put; } - cdev->handler = ur_int_handler; /* validate virtual unit record device */ urd->class = get_urd_class(urd); @@ -823,6 +848,7 @@ static int ur_probe(struct ccw_device *cdev) } spin_lock_irq(get_ccwdev_lock(cdev)); dev_set_drvdata(&cdev->dev, urd); + cdev->handler = ur_int_handler; spin_unlock_irq(get_ccwdev_lock(cdev)); mutex_unlock(&vmur_mutex); @@ -928,6 +954,10 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force) rc = -EBUSY; goto fail_urdev_put; } + if (cancel_work_sync(&urd->uevent_work)) { + /* Work not run yet - need to release reference here */ + urdev_put(urd); + } device_destroy(vmur_class, urd->char_device->dev); cdev_del(urd->char_device); urd->char_device = NULL; @@ -963,6 +993,7 @@ static void ur_remove(struct ccw_device *cdev) spin_lock_irqsave(get_ccwdev_lock(cdev), flags); urdev_put(dev_get_drvdata(&cdev->dev)); dev_set_drvdata(&cdev->dev, NULL); + cdev->handler = NULL; spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); mutex_unlock(&vmur_mutex); diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h index 608b0719ce17..92d17d7cb47b 100644 --- a/drivers/s390/char/vmur.h +++ b/drivers/s390/char/vmur.h @@ -13,6 +13,7 @@ #define _VMUR_H_ #include <linux/refcount.h> +#include <linux/workqueue.h> #define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */ #define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */ @@ -76,6 +77,7 @@ struct urdev { wait_queue_head_t wait; /* wait queue to serialize open */ int open_flag; /* "urdev is open" flag */ spinlock_t open_lock; /* serialize critical sections */ + struct work_struct uevent_work; /* work to send uevent */ }; /* diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index 58cf8c40d08d..ed4c571f8771 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -2,9 +2,9 @@ if SOC_SIFIVE -config SIFIVE_L2 - bool "Sifive L2 Cache controller" +config SIFIVE_CCACHE + bool "Sifive Composable Cache controller" help - Support for the L2 cache controller on SiFive platforms. + Support for the composable cache controller on SiFive platforms. endif diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile index b5caff77938f..1f5dc339bf82 100644 --- a/drivers/soc/sifive/Makefile +++ b/drivers/soc/sifive/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_SIFIVE_L2) += sifive_l2_cache.o +obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c new file mode 100644 index 000000000000..1c171150e878 --- /dev/null +++ b/drivers/soc/sifive/sifive_ccache.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SiFive composable cache controller Driver + * + * Copyright (C) 2018-2022 SiFive, Inc. + * + */ + +#define pr_fmt(fmt) "CCACHE: " fmt + +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/device.h> +#include <linux/bitfield.h> +#include <asm/cacheinfo.h> +#include <soc/sifive/sifive_ccache.h> + +#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100 +#define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104 +#define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108 + +#define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120 +#define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124 +#define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128 + +#define SIFIVE_CCACHE_DATECCFIX_LOW 0x140 +#define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144 +#define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148 + +#define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160 +#define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164 +#define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168 + +#define SIFIVE_CCACHE_CONFIG 0x00 +#define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0) +#define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8) +#define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16) +#define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24) + +#define SIFIVE_CCACHE_WAYENABLE 0x08 +#define SIFIVE_CCACHE_ECCINJECTERR 0x40 + +#define SIFIVE_CCACHE_MAX_ECCINTR 4 + +static void __iomem *ccache_base; +static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR]; +static struct riscv_cacheinfo_ops ccache_cache_ops; +static int level; + +enum { + DIR_CORR = 0, + DATA_CORR, + DATA_UNCORR, + DIR_UNCORR, +}; + +#ifdef CONFIG_DEBUG_FS +static struct dentry *sifive_test; + +static ssize_t ccache_write(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + unsigned int val; + + if (kstrtouint_from_user(data, count, 0, &val)) + return -EINVAL; + if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF)) + writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR); + else + return -EINVAL; + return count; +} + +static const struct file_operations ccache_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = ccache_write +}; + +static void setup_sifive_debug(void) +{ + sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL); + + debugfs_create_file("sifive_debug_inject_error", 0200, + sifive_test, NULL, &ccache_fops); +} +#endif + +static void ccache_config_read(void) +{ + u32 cfg; + + cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG); + pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n", + FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg), + FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg), + BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)), + BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg))); + + cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE); + pr_info("Index of the largest way enabled: %u\n", cfg); +} + +static const struct of_device_id sifive_ccache_ids[] = { + { .compatible = "sifive,fu540-c000-ccache" }, + { .compatible = "sifive,fu740-c000-ccache" }, + { .compatible = "sifive,ccache0" }, + { /* end of table */ } +}; + +static ATOMIC_NOTIFIER_HEAD(ccache_err_chain); + +int register_sifive_ccache_error_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&ccache_err_chain, nb); +} +EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier); + +int unregister_sifive_ccache_error_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&ccache_err_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier); + +static int ccache_largest_wayenabled(void) +{ + return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF; +} + +static ssize_t number_of_ways_enabled_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", ccache_largest_wayenabled()); +} + +static DEVICE_ATTR_RO(number_of_ways_enabled); + +static struct attribute *priv_attrs[] = { + &dev_attr_number_of_ways_enabled.attr, + NULL, +}; + +static const struct attribute_group priv_attr_group = { + .attrs = priv_attrs, +}; + +static const struct attribute_group *ccache_get_priv_group(struct cacheinfo + *this_leaf) +{ + /* We want to use private group for composable cache only */ + if (this_leaf->level == level) + return &priv_attr_group; + else + return NULL; +} + +static irqreturn_t ccache_int_handler(int irq, void *device) +{ + unsigned int add_h, add_l; + + if (irq == g_irq[DIR_CORR]) { + add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH); + add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW); + pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l); + /* Reading this register clears the DirError interrupt sig */ + readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT); + atomic_notifier_call_chain(&ccache_err_chain, + SIFIVE_CCACHE_ERR_TYPE_CE, + "DirECCFix"); + } + if (irq == g_irq[DIR_UNCORR]) { + add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH); + add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW); + /* Reading this register clears the DirFail interrupt sig */ + readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT); + atomic_notifier_call_chain(&ccache_err_chain, + SIFIVE_CCACHE_ERR_TYPE_UE, + "DirECCFail"); + panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l); + } + if (irq == g_irq[DATA_CORR]) { + add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH); + add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW); + pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l); + /* Reading this register clears the DataError interrupt sig */ + readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT); + atomic_notifier_call_chain(&ccache_err_chain, + SIFIVE_CCACHE_ERR_TYPE_CE, + "DatECCFix"); + } + if (irq == g_irq[DATA_UNCORR]) { + add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH); + add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW); + pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l); + /* Reading this register clears the DataFail interrupt sig */ + readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT); + atomic_notifier_call_chain(&ccache_err_chain, + SIFIVE_CCACHE_ERR_TYPE_UE, + "DatECCFail"); + } + + return IRQ_HANDLED; +} + +static int __init sifive_ccache_init(void) +{ + struct device_node *np; + struct resource res; + int i, rc, intr_num; + + np = of_find_matching_node(NULL, sifive_ccache_ids); + if (!np) + return -ENODEV; + + if (of_address_to_resource(np, 0, &res)) + return -ENODEV; + + ccache_base = ioremap(res.start, resource_size(&res)); + if (!ccache_base) + return -ENOMEM; + + if (of_property_read_u32(np, "cache-level", &level)) + return -ENOENT; + + intr_num = of_property_count_u32_elems(np, "interrupts"); + if (!intr_num) { + pr_err("No interrupts property\n"); + return -ENODEV; + } + + for (i = 0; i < intr_num; i++) { + g_irq[i] = irq_of_parse_and_map(np, i); + rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc", + NULL); + if (rc) { + pr_err("Could not request IRQ %d\n", g_irq[i]); + return rc; + } + } + + ccache_config_read(); + + ccache_cache_ops.get_priv_group = ccache_get_priv_group; + riscv_set_cacheinfo_ops(&ccache_cache_ops); + +#ifdef CONFIG_DEBUG_FS + setup_sifive_debug(); +#endif + return 0; +} + +device_initcall(sifive_ccache_init); diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c deleted file mode 100644 index 59640a1d0b28..000000000000 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ /dev/null @@ -1,237 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * SiFive L2 cache controller Driver - * - * Copyright (C) 2018-2019 SiFive, Inc. - * - */ -#include <linux/debugfs.h> -#include <linux/interrupt.h> -#include <linux/of_irq.h> -#include <linux/of_address.h> -#include <linux/device.h> -#include <asm/cacheinfo.h> -#include <soc/sifive/sifive_l2_cache.h> - -#define SIFIVE_L2_DIRECCFIX_LOW 0x100 -#define SIFIVE_L2_DIRECCFIX_HIGH 0x104 -#define SIFIVE_L2_DIRECCFIX_COUNT 0x108 - -#define SIFIVE_L2_DIRECCFAIL_LOW 0x120 -#define SIFIVE_L2_DIRECCFAIL_HIGH 0x124 -#define SIFIVE_L2_DIRECCFAIL_COUNT 0x128 - -#define SIFIVE_L2_DATECCFIX_LOW 0x140 -#define SIFIVE_L2_DATECCFIX_HIGH 0x144 -#define SIFIVE_L2_DATECCFIX_COUNT 0x148 - -#define SIFIVE_L2_DATECCFAIL_LOW 0x160 -#define SIFIVE_L2_DATECCFAIL_HIGH 0x164 -#define SIFIVE_L2_DATECCFAIL_COUNT 0x168 - -#define SIFIVE_L2_CONFIG 0x00 -#define SIFIVE_L2_WAYENABLE 0x08 -#define SIFIVE_L2_ECCINJECTERR 0x40 - -#define SIFIVE_L2_MAX_ECCINTR 4 - -static void __iomem *l2_base; -static int g_irq[SIFIVE_L2_MAX_ECCINTR]; -static struct riscv_cacheinfo_ops l2_cache_ops; - -enum { - DIR_CORR = 0, - DATA_CORR, - DATA_UNCORR, - DIR_UNCORR, -}; - -#ifdef CONFIG_DEBUG_FS -static struct dentry *sifive_test; - -static ssize_t l2_write(struct file *file, const char __user *data, - size_t count, loff_t *ppos) -{ - unsigned int val; - - if (kstrtouint_from_user(data, count, 0, &val)) - return -EINVAL; - if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF)) - writel(val, l2_base + SIFIVE_L2_ECCINJECTERR); - else - return -EINVAL; - return count; -} - -static const struct file_operations l2_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = l2_write -}; - -static void setup_sifive_debug(void) -{ - sifive_test = debugfs_create_dir("sifive_l2_cache", NULL); - - debugfs_create_file("sifive_debug_inject_error", 0200, - sifive_test, NULL, &l2_fops); -} -#endif - -static void l2_config_read(void) -{ - u32 regval, val; - - regval = readl(l2_base + SIFIVE_L2_CONFIG); - val = regval & 0xFF; - pr_info("L2CACHE: No. of Banks in the cache: %d\n", val); - val = (regval & 0xFF00) >> 8; - pr_info("L2CACHE: No. of ways per bank: %d\n", val); - val = (regval & 0xFF0000) >> 16; - pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val); - val = (regval & 0xFF000000) >> 24; - pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val); - - regval = readl(l2_base + SIFIVE_L2_WAYENABLE); - pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval); -} - -static const struct of_device_id sifive_l2_ids[] = { - { .compatible = "sifive,fu540-c000-ccache" }, - { .compatible = "sifive,fu740-c000-ccache" }, - { /* end of table */ }, -}; - -static ATOMIC_NOTIFIER_HEAD(l2_err_chain); - -int register_sifive_l2_error_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&l2_err_chain, nb); -} -EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier); - -int unregister_sifive_l2_error_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&l2_err_chain, nb); -} -EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier); - -static int l2_largest_wayenabled(void) -{ - return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF; -} - -static ssize_t number_of_ways_enabled_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", l2_largest_wayenabled()); -} - -static DEVICE_ATTR_RO(number_of_ways_enabled); - -static struct attribute *priv_attrs[] = { - &dev_attr_number_of_ways_enabled.attr, - NULL, -}; - -static const struct attribute_group priv_attr_group = { - .attrs = priv_attrs, -}; - -static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_leaf) -{ - /* We want to use private group for L2 cache only */ - if (this_leaf->level == 2) - return &priv_attr_group; - else - return NULL; -} - -static irqreturn_t l2_int_handler(int irq, void *device) -{ - unsigned int add_h, add_l; - - if (irq == g_irq[DIR_CORR]) { - add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH); - add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW); - pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l); - /* Reading this register clears the DirError interrupt sig */ - readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT); - atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE, - "DirECCFix"); - } - if (irq == g_irq[DIR_UNCORR]) { - add_h = readl(l2_base + SIFIVE_L2_DIRECCFAIL_HIGH); - add_l = readl(l2_base + SIFIVE_L2_DIRECCFAIL_LOW); - /* Reading this register clears the DirFail interrupt sig */ - readl(l2_base + SIFIVE_L2_DIRECCFAIL_COUNT); - atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE, - "DirECCFail"); - panic("L2CACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l); - } - if (irq == g_irq[DATA_CORR]) { - add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH); - add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW); - pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l); - /* Reading this register clears the DataError interrupt sig */ - readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT); - atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE, - "DatECCFix"); - } - if (irq == g_irq[DATA_UNCORR]) { - add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH); - add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW); - pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l); - /* Reading this register clears the DataFail interrupt sig */ - readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT); - atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE, - "DatECCFail"); - } - - return IRQ_HANDLED; -} - -static int __init sifive_l2_init(void) -{ - struct device_node *np; - struct resource res; - int i, rc, intr_num; - - np = of_find_matching_node(NULL, sifive_l2_ids); - if (!np) - return -ENODEV; - - if (of_address_to_resource(np, 0, &res)) - return -ENODEV; - - l2_base = ioremap(res.start, resource_size(&res)); - if (!l2_base) - return -ENOMEM; - - intr_num = of_property_count_u32_elems(np, "interrupts"); - if (!intr_num) { - pr_err("L2CACHE: no interrupts property\n"); - return -ENODEV; - } - - for (i = 0; i < intr_num; i++) { - g_irq[i] = irq_of_parse_and_map(np, i); - rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL); - if (rc) { - pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]); - return rc; - } - } - - l2_config_read(); - - l2_cache_ops.get_priv_group = l2_get_priv_group; - riscv_set_cacheinfo_ops(&l2_cache_ops); - -#ifdef CONFIG_DEBUG_FS - setup_sifive_debug(); -#endif - return 0; -} -device_initcall(sifive_l2_init); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 13cdd9def087..434f83168546 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -603,21 +603,6 @@ config SERIAL_MUX_CONSOLE select SERIAL_CORE_CONSOLE default y -config PDC_CONSOLE - bool "PDC software console support" - depends on PARISC && !SERIAL_MUX && VT - help - Saying Y here will enable the software based PDC console to be - used as the system console. This is useful for machines in - which the hardware based console has not been written yet. The - following steps must be completed to use the PDC console: - - 1. create the device entry (mknod /dev/ttyB0 c 11 0) - 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 - 3. Add device ttyB0 to /etc/securetty (if you want to log on as - root on this console.) - 4. Change the kernel command console parameter to: console=ttyB0 - config SERIAL_SUNSAB tristate "Sun Siemens SAB82532 serial support" depends on SPARC && PCI diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 38a861e22c33..7753e586e65a 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1298,7 +1298,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) /* limit fbsize to max visible screen size */ if (fix->smem_len > yres*fix->line_length) - fix->smem_len = yres*fix->line_length; + fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024); fix->accel = FB_ACCEL_NONE; |