aboutsummaryrefslogtreecommitdiff
path: root/include/net/page_pool
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/page_pool')
-rw-r--r--include/net/page_pool/helpers.h39
-rw-r--r--include/net/page_pool/types.h23
2 files changed, 28 insertions, 34 deletions
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 2b43a893c619..793e6fd78bc5 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -216,7 +216,7 @@ page_pool_get_dma_dir(const struct page_pool *pool)
static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
{
- atomic_long_set(&netmem_to_page(netmem)->pp_ref_count, nr);
+ atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr);
}
/**
@@ -244,7 +244,7 @@ static inline void page_pool_fragment_page(struct page *page, long nr)
static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
{
- struct page *page = netmem_to_page(netmem);
+ atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem);
long ret;
/* If nr == pp_ref_count then we have cleared all remaining
@@ -261,19 +261,19 @@ static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
* initially, and only overwrite it when the page is partitioned into
* more than one piece.
*/
- if (atomic_long_read(&page->pp_ref_count) == nr) {
+ if (atomic_long_read(pp_ref_count) == nr) {
/* As we have ensured nr is always one for constant case using
* the BUILD_BUG_ON(), only need to handle the non-constant case
* here for pp_ref_count draining, which is a rare case.
*/
BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
if (!__builtin_constant_p(nr))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return 0;
}
- ret = atomic_long_sub_return(nr, &page->pp_ref_count);
+ ret = atomic_long_sub_return(nr, pp_ref_count);
WARN_ON(ret < 0);
/* We are the last user here too, reset pp_ref_count back to 1 to
@@ -282,7 +282,7 @@ static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
* page_pool_unref_page() currently.
*/
if (unlikely(!ret))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return ret;
}
@@ -401,9 +401,7 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
{
- struct page *page = netmem_to_page(netmem);
-
- dma_addr_t ret = page->dma_addr;
+ dma_addr_t ret = netmem_get_dma_addr(netmem);
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
ret <<= PAGE_SHIFT;
@@ -423,24 +421,6 @@ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
}
-static inline bool page_pool_set_dma_addr_netmem(netmem_ref netmem,
- dma_addr_t addr)
-{
- struct page *page = netmem_to_page(netmem);
-
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
- page->dma_addr = addr >> PAGE_SHIFT;
-
- /* We assume page alignment to shave off bottom bits,
- * if this "compression" doesn't work we need to drop.
- */
- return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
- }
-
- page->dma_addr = addr;
- return false;
-}
-
/**
* page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
* @pool: &page_pool the @page belongs to
@@ -463,11 +443,6 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
page_pool_get_dma_dir(pool));
}
-static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
-{
- return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
-}
-
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 50569fed7868..c022c410abe3 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -20,8 +20,18 @@
* device driver responsibility
*/
#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+
+/* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting
+ * this must be able to support unreadable netmem, where netmem_address() would
+ * return NULL. This flag should not be set for header page_pools.
+ *
+ * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set
+ * page_pool_params.slow.queue_idx.
+ */
+#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3)
+
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
- PP_FLAG_SYSTEM_POOL)
+ PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
/*
* Fast allocation side cache array/stack
@@ -57,7 +67,9 @@ struct pp_alloc_cache {
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
* @slow: params with slowpath access only (initialization and Netlink)
* @netdev: netdev this pool will serve (leave as NULL if none or multiple)
- * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
+ * @queue_idx: queue idx this page_pool is being created for.
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL,
+ * PP_FLAG_ALLOW_UNREADABLE_NETMEM.
*/
struct page_pool_params {
struct_group_tagged(page_pool_params_fast, fast,
@@ -72,6 +84,7 @@ struct page_pool_params {
);
struct_group_tagged(page_pool_params_slow, slow,
struct net_device *netdev;
+ unsigned int queue_idx;
unsigned int flags;
/* private: used by test code only */
void (*init_callback)(netmem_ref netmem, void *arg);
@@ -139,6 +152,10 @@ struct page_pool_stats {
*/
#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long))
+struct pp_memory_provider_params {
+ void *mp_priv;
+};
+
struct page_pool {
struct page_pool_params_fast p;
@@ -197,6 +214,8 @@ struct page_pool {
*/
struct ptr_ring ring;
+ void *mp_priv;
+
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;