aboutsummaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@redhat.com>2011-12-30 13:59:37 -0200
committerMauro Carvalho Chehab <mchehab@redhat.com>2011-12-30 13:59:37 -0200
commitb4d48c942c17ce3d3a330ad91e109e522bc97378 (patch)
tree3365292f3a5a502edb51492d011fd326c930ca40 /mm/vmscan.c
parent1a5cd29631a6b75e49e6ad8a770ab9d69cda0fa2 (diff)
parent5f0a6e2d503896062f641639dacfe5055c2f593b (diff)
Merge tag 'v3.2-rc7' into staging/for_v3.3
Linux 3.2-rc7 * tag 'v3.2-rc7': (1304 commits) Linux 3.2-rc7 netfilter: xt_connbytes: handle negation correctly Btrfs: call d_instantiate after all ops are setup Btrfs: fix worker lock misuse in find_worker net: relax rcvbuf limits rps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt() net: introduce DST_NOPEER dst flag mqprio: Avoid panic if no options are provided bridge: provide a mtu() method for fake_dst_ops md/bitmap: It is OK to clear bits during recovery. md: don't give up looking for spares on first failure-to-add md/raid5: ensure correct assessment of drives during degraded reshape. md/linear: fix hot-add of devices to linear arrays. sparc64: Fix MSIQ HV call ordering in pci_sun4v_msiq_build_irq(). pata_of_platform: Add missing CONFIG_OF_IRQ dependency. ipv4: using prefetch requires including prefetch.h VFS: Fix race between CPU hotplug and lglocks vfs: __read_cache_page should use gfp argument rather than GFP_KERNEL USB: Fix usb/isp1760 build on sparc net: Add a flow_cache_flush_deferred function ... Conflicts: drivers/media/common/tuners/tda18218.c drivers/media/video/omap3isp/ispccdc.c drivers/staging/media/as102/as102_drv.h
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1893c050795..f54a05b7a61d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
*/
void register_shrinker(struct shrinker *shrinker)
{
- shrinker->nr = 0;
+ atomic_long_set(&shrinker->nr_in_batch, 0);
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
- unsigned long total_scan;
- unsigned long max_pass;
+ long total_scan;
+ long max_pass;
int shrink_ret = 0;
long nr;
long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
+ max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+ if (max_pass <= 0)
+ continue;
+
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- do {
- nr = shrinker->nr;
- } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
total_scan = nr;
- max_pass = do_shrinker_shrink(shrinker, shrink, 0);
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
* manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update.
*/
- do {
- nr = shrinker->nr;
- new_nr = total_scan + nr;
- if (total_scan <= 0)
- break;
- } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+ if (total_scan > 0)
+ new_nr = atomic_long_add_return(total_scan,
+ &shrinker->nr_in_batch);
+ else
+ new_nr = atomic_long_read(&shrinker->nr_in_batch);
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}