diff options
Diffstat (limited to 'kernel/dma/swiotlb.c')
| -rw-r--r-- | kernel/dma/swiotlb.c | 63 | 
1 files changed, 39 insertions, 24 deletions
| diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 339a990554e7..a34c38bbe28f 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -300,6 +300,37 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,  	return;  } +static void *swiotlb_memblock_alloc(unsigned long nslabs, unsigned int flags, +		int (*remap)(void *tlb, unsigned long nslabs)) +{ +	size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT); +	void *tlb; + +	/* +	 * By default allocate the bounce buffer memory from low memory, but +	 * allow to pick a location everywhere for hypervisors with guest +	 * memory encryption. +	 */ +	if (flags & SWIOTLB_ANY) +		tlb = memblock_alloc(bytes, PAGE_SIZE); +	else +		tlb = memblock_alloc_low(bytes, PAGE_SIZE); + +	if (!tlb) { +		pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", +			__func__, bytes); +		return NULL; +	} + +	if (remap && remap(tlb, nslabs) < 0) { +		memblock_free(tlb, PAGE_ALIGN(bytes)); +		pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); +		return NULL; +	} + +	return tlb; +} +  /*   * Statically reserve bounce buffer space and initialize bounce buffer data   * structures for the software IO TLB used to implement the DMA API. @@ -310,7 +341,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,  	struct io_tlb_mem *mem = &io_tlb_default_mem;  	unsigned long nslabs;  	size_t alloc_size; -	size_t bytes;  	void *tlb;  	if (!addressing_limit && !swiotlb_force_bounce) @@ -326,31 +356,16 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,  		swiotlb_adjust_nareas(num_possible_cpus());  	nslabs = default_nslabs; -	/* -	 * By default allocate the bounce buffer memory from low memory, but -	 * allow to pick a location everywhere for hypervisors with guest -	 * memory encryption. -	 */ -retry: -	bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT); -	if (flags & SWIOTLB_ANY) -		tlb = memblock_alloc(bytes, PAGE_SIZE); -	else -		tlb = memblock_alloc_low(bytes, PAGE_SIZE); -	if (!tlb) { -		pr_warn("%s: failed to allocate tlb structure\n", __func__); -		return; -	} - -	if (remap && remap(tlb, nslabs) < 0) { -		memblock_free(tlb, PAGE_ALIGN(bytes)); - +	while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) { +		if (nslabs <= IO_TLB_MIN_SLABS) +			return;  		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); -		if (nslabs >= IO_TLB_MIN_SLABS) -			goto retry; +	} -		pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); -		return; +	if (default_nslabs != nslabs) { +		pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", +			default_nslabs, nslabs); +		default_nslabs = nslabs;  	}  	alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); |