diff options
Diffstat (limited to 'arch/x86/kernel/cpu/intel_rdt_rdtgroup.c')
| -rw-r--r-- | arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 176 | 
1 files changed, 160 insertions, 16 deletions
| diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index b140c68bc14b..f27b8115ffa2 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,  			      struct seq_file *s, void *v)  {  	struct rdtgroup *rdtgrp; +	struct cpumask *mask;  	int ret = 0;  	rdtgrp = rdtgroup_kn_lock_live(of->kn);  	if (rdtgrp) { -		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) -			seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", -				   cpumask_pr_args(&rdtgrp->plr->d->cpu_mask)); -		else +		if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { +			if (!rdtgrp->plr->d) { +				rdt_last_cmd_clear(); +				rdt_last_cmd_puts("Cache domain offline\n"); +				ret = -ENODEV; +			} else { +				mask = &rdtgrp->plr->d->cpu_mask; +				seq_printf(s, is_cpu_list(of) ? +					   "%*pbl\n" : "%*pb\n", +					   cpumask_pr_args(mask)); +			} +		} else {  			seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",  				   cpumask_pr_args(&rdtgrp->cpu_mask)); +		}  	} else {  		ret = -ENOENT;  	} @@ -961,7 +971,78 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,  }  /** - * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other + * rdt_cdp_peer_get - Retrieve CDP peer if it exists + * @r: RDT resource to which RDT domain @d belongs + * @d: Cache instance for which a CDP peer is requested + * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer) + *         Used to return the result. + * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer) + *         Used to return the result. + * + * RDT resources are managed independently and by extension the RDT domains + * (RDT resource instances) are managed independently also. The Code and + * Data Prioritization (CDP) RDT resources, while managed independently, + * could refer to the same underlying hardware. For example, + * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache. + * + * When provided with an RDT resource @r and an instance of that RDT + * resource @d rdt_cdp_peer_get() will return if there is a peer RDT + * resource and the exact instance that shares the same hardware. + * + * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists. + *         If a CDP peer was found, @r_cdp will point to the peer RDT resource + *         and @d_cdp will point to the peer RDT domain. + */ +static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, +			    struct rdt_resource **r_cdp, +			    struct rdt_domain **d_cdp) +{ +	struct rdt_resource *_r_cdp = NULL; +	struct rdt_domain *_d_cdp = NULL; +	int ret = 0; + +	switch (r->rid) { +	case RDT_RESOURCE_L3DATA: +		_r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE]; +		break; +	case RDT_RESOURCE_L3CODE: +		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L3DATA]; +		break; +	case RDT_RESOURCE_L2DATA: +		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2CODE]; +		break; +	case RDT_RESOURCE_L2CODE: +		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2DATA]; +		break; +	default: +		ret = -ENOENT; +		goto out; +	} + +	/* +	 * When a new CPU comes online and CDP is enabled then the new +	 * RDT domains (if any) associated with both CDP RDT resources +	 * are added in the same CPU online routine while the +	 * rdtgroup_mutex is held. It should thus not happen for one +	 * RDT domain to exist and be associated with its RDT CDP +	 * resource but there is no RDT domain associated with the +	 * peer RDT CDP resource. Hence the WARN. +	 */ +	_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); +	if (WARN_ON(!_d_cdp)) { +		_r_cdp = NULL; +		ret = -EINVAL; +	} + +out: +	*r_cdp = _r_cdp; +	*d_cdp = _d_cdp; + +	return ret; +} + +/** + * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other   * @r: Resource to which domain instance @d belongs.   * @d: The domain instance for which @closid is being tested.   * @cbm: Capacity bitmask being tested. @@ -980,8 +1061,8 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,   *   * Return: false if CBM does not overlap, true if it does.   */ -bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, -			   unsigned long cbm, int closid, bool exclusive) +static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, +				    unsigned long cbm, int closid, bool exclusive)  {  	enum rdtgrp_mode mode;  	unsigned long ctrl_b; @@ -1017,6 +1098,41 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,  }  /** + * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware + * @r: Resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Resources that can be allocated using a CBM can use the CBM to control + * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test + * for overlap. Overlap test is not limited to the specific resource for + * which the CBM is intended though - when dealing with CDP resources that + * share the underlying hardware the overlap check should be performed on + * the CDP resource sharing the hardware also. + * + * Refer to description of __rdtgroup_cbm_overlaps() for the details of the + * overlap test. + * + * Return: true if CBM overlap detected, false if there is no overlap + */ +bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, +			   unsigned long cbm, int closid, bool exclusive) +{ +	struct rdt_resource *r_cdp; +	struct rdt_domain *d_cdp; + +	if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive)) +		return true; + +	if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0) +		return false; + +	return  __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive); +} + +/**   * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive   *   * An exclusive resource group implies that there should be no sharing of @@ -1176,6 +1292,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,  	struct rdt_resource *r;  	struct rdt_domain *d;  	unsigned int size; +	int ret = 0;  	bool sep;  	u32 ctrl; @@ -1186,11 +1303,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,  	}  	if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { -		seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name); -		size = rdtgroup_cbm_to_size(rdtgrp->plr->r, -					    rdtgrp->plr->d, -					    rdtgrp->plr->cbm); -		seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); +		if (!rdtgrp->plr->d) { +			rdt_last_cmd_clear(); +			rdt_last_cmd_puts("Cache domain offline\n"); +			ret = -ENODEV; +		} else { +			seq_printf(s, "%*s:", max_name_width, +				   rdtgrp->plr->r->name); +			size = rdtgroup_cbm_to_size(rdtgrp->plr->r, +						    rdtgrp->plr->d, +						    rdtgrp->plr->cbm); +			seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); +		}  		goto out;  	} @@ -1220,7 +1344,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,  out:  	rdtgroup_kn_unlock(of->kn); -	return 0; +	return ret;  }  /* rdtgroup information files for one cache resource. */ @@ -2354,14 +2478,16 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)   */  static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)  { +	struct rdt_resource *r_cdp = NULL; +	struct rdt_domain *d_cdp = NULL;  	u32 used_b = 0, unused_b = 0;  	u32 closid = rdtgrp->closid;  	struct rdt_resource *r;  	unsigned long tmp_cbm;  	enum rdtgrp_mode mode;  	struct rdt_domain *d; +	u32 peer_ctl, *ctrl;  	int i, ret; -	u32 *ctrl;  	for_each_alloc_enabled_rdt_resource(r) {  		/* @@ -2371,6 +2497,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)  		if (r->rid == RDT_RESOURCE_MBA)  			continue;  		list_for_each_entry(d, &r->domains, list) { +			rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);  			d->have_new_ctrl = false;  			d->new_ctrl = r->cache.shareable_bits;  			used_b = r->cache.shareable_bits; @@ -2380,9 +2507,19 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)  					mode = rdtgroup_mode_by_closid(i);  					if (mode == RDT_MODE_PSEUDO_LOCKSETUP)  						break; -					used_b |= *ctrl; +					/* +					 * If CDP is active include peer +					 * domain's usage to ensure there +					 * is no overlap with an exclusive +					 * group. +					 */ +					if (d_cdp) +						peer_ctl = d_cdp->ctrl_val[i]; +					else +						peer_ctl = 0; +					used_b |= *ctrl | peer_ctl;  					if (mode == RDT_MODE_SHAREABLE) -						d->new_ctrl |= *ctrl; +						d->new_ctrl |= *ctrl | peer_ctl;  				}  			}  			if (d->plr && d->plr->cbm > 0) @@ -2805,6 +2942,13 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)  {  	if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)  		seq_puts(seq, ",cdp"); + +	if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) +		seq_puts(seq, ",cdpl2"); + +	if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) +		seq_puts(seq, ",mba_MBps"); +  	return 0;  } |