diff options
Diffstat (limited to 'Documentation/RCU/rculist_nulls.rst')
-rw-r--r-- | Documentation/RCU/rculist_nulls.rst | 38 |
1 files changed, 27 insertions, 11 deletions
diff --git a/Documentation/RCU/rculist_nulls.rst b/Documentation/RCU/rculist_nulls.rst index 9a734bf54b76..21e40fcc08de 100644 --- a/Documentation/RCU/rculist_nulls.rst +++ b/Documentation/RCU/rculist_nulls.rst @@ -18,7 +18,16 @@ to solve following problem. Without 'nulls', a typical RCU linked list managing objects which are allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can use the following -algorithms: +algorithms. Following examples assume 'obj' is a pointer to such +objects, which is having below type. + +:: + + struct object { + struct hlist_node obj_node; + atomic_t refcnt; + unsigned int key; + }; 1) Lookup algorithm ------------------- @@ -26,11 +35,13 @@ algorithms: :: begin: - rcu_read_lock() + rcu_read_lock(); obj = lockless_lookup(key); if (obj) { - if (!try_get_ref(obj)) // might fail for free objects + if (!try_get_ref(obj)) { // might fail for free objects + rcu_read_unlock(); goto begin; + } /* * Because a writer could delete object, and a writer could * reuse these object before the RCU grace period, we @@ -54,7 +65,7 @@ but a version with an additional memory barrier (smp_rmb()) struct hlist_node *node, *next; for (pos = rcu_dereference((head)->first); pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) && - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); + ({ obj = hlist_entry(pos, typeof(*obj), obj_node); 1; }); pos = rcu_dereference(next)) if (obj->key == key) return obj; @@ -66,10 +77,10 @@ And note the traditional hlist_for_each_entry_rcu() misses this smp_rmb():: struct hlist_node *node; for (pos = rcu_dereference((head)->first); pos && ({ prefetch(pos->next); 1; }) && - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); + ({ obj = hlist_entry(pos, typeof(*obj), obj_node); 1; }); pos = rcu_dereference(pos->next)) - if (obj->key == key) - return obj; + if (obj->key == key) + return obj; return NULL; Quoting Corey Minyard:: @@ -86,7 +97,7 @@ Quoting Corey Minyard:: 2) Insertion algorithm ---------------------- -We need to make sure a reader cannot read the new 'obj->obj_next' value +We need to make sure a reader cannot read the new 'obj->obj_node.next' value and previous value of 'obj->key'. Otherwise, an item could be deleted from a chain, and inserted into another chain. If new chain was empty before the move, 'next' pointer is NULL, and lockless reader can not @@ -129,8 +140,7 @@ very very fast (before the end of RCU grace period) Avoiding extra smp_rmb() ======================== -With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup() -and extra _release() in insert function. +With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup(). For example, if we choose to store the slot number as the 'nulls' end-of-list marker for each slot of the hash table, we can detect @@ -142,6 +152,9 @@ the beginning. If the object was moved to the same chain, then the reader doesn't care: It might occasionally scan the list again without harm. +Note that using hlist_nulls means the type of 'obj_node' field of +'struct object' becomes 'struct hlist_nulls_node'. + 1) lookup algorithm ------------------- @@ -151,7 +164,7 @@ scan the list again without harm. head = &table[slot]; begin: rcu_read_lock(); - hlist_nulls_for_each_entry_rcu(obj, node, head, member) { + hlist_nulls_for_each_entry_rcu(obj, node, head, obj_node) { if (obj->key == key) { if (!try_get_ref(obj)) { // might fail for free objects rcu_read_unlock(); @@ -182,6 +195,9 @@ scan the list again without harm. 2) Insert algorithm ------------------- +Same to the above one, but uses hlist_nulls_add_head_rcu() instead of +hlist_add_head_rcu(). + :: /* |