aboutsummaryrefslogtreecommitdiff
path: root/include/linux/shrinker.h
diff options
context:
space:
mode:
authorTony Lindgren <[email protected]>2018-08-28 09:58:03 -0700
committerTony Lindgren <[email protected]>2018-08-28 09:58:03 -0700
commitea4d65f14f6aaa53e379b93c5544245ef081b3e7 (patch)
treea15485f4f1cf547a52b31fa8e16e14b9579b7200 /include/linux/shrinker.h
parentce32d59ee2cd036f6e8a6ed17a06a0b0bec5c67c (diff)
parent496f3347d834aec91c38b45d6249ed00f58ad233 (diff)
Merge branch 'perm-fix' into omap-for-v4.19/fixes-v2
Diffstat (limited to 'include/linux/shrinker.h')
-rw-r--r--include/linux/shrinker.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 6794490f25b2..9443cafd1969 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -12,6 +12,9 @@
struct shrink_control {
gfp_t gfp_mask;
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
+
/*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
@@ -26,20 +29,20 @@ struct shrink_control {
*/
unsigned long nr_scanned;
- /* current node being shrunk (for NUMA aware shrinkers) */
- int nid;
-
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
};
#define SHRINK_STOP (~0UL)
+#define SHRINK_EMPTY (~0UL - 1)
/*
* A callback you can register to apply pressure to ageable caches.
*
* @count_objects should return the number of freeable items in the cache. If
- * there are no objects to free or the number of freeable items cannot be
- * determined, it should return 0. No deadlock checks should be done during the
+ * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
+ * returned in cases of the number of freeable items cannot be determined
+ * or shrinker should skip this cache for this time (e.g., their number
+ * is below shrinkable limit). No deadlock checks should be done during the
* count callback - the shrinker relies on aggregating scan counts that couldn't
* be executed due to potential deadlocks to be run at a later call when the
* deadlock condition is no longer pending.
@@ -60,12 +63,16 @@ struct shrinker {
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);
- int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
- unsigned long flags;
+ int seeks; /* seeks to recreate an obj */
+ unsigned flags;
/* These are for internal use */
struct list_head list;
+#ifdef CONFIG_MEMCG_KMEM
+ /* ID in shrinker_idr */
+ int id;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};