mm: replace various uses of num_physpages by totalram_pages

Sizing of memory allocations shouldn't depend on the number of physical
pages found in a system, as that generally includes (perhaps a huge amount
of) non-RAM pages.  The amount of what actually is usable as storage
should instead be used as a basis here.

Some of the calculations (i.e.  those not intending to use high memory)
should likely even use (totalram_pages - totalhigh_pages).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Acked-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Dave Airlie <airlied@linux.ie>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jan Beulich 2009-09-21 17:03:05 -07:00 committed by Linus Torvalds
parent 4738e1b9cf
commit 4481374ce8
21 changed files with 38 additions and 41 deletions

View file

@ -210,8 +210,8 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
{
ssize_t ret = -EINVAL;
if ((len >> PAGE_SHIFT) > num_physpages) {
pr_err("microcode: too much data (max %ld pages)\n", num_physpages);
if ((len >> PAGE_SHIFT) > totalram_pages) {
pr_err("microcode: too much data (max %ld pages)\n", totalram_pages);
return ret;
}

View file

@ -114,9 +114,9 @@ static int agp_find_max(void)
long memory, index, result;
#if PAGE_SHIFT < 20
memory = num_physpages >> (20 - PAGE_SHIFT);
memory = totalram_pages >> (20 - PAGE_SHIFT);
#else
memory = num_physpages << (PAGE_SHIFT - 20);
memory = totalram_pages << (PAGE_SHIFT - 20);
#endif
index = 1;

View file

@ -1266,7 +1266,7 @@ ccio_ioc_init(struct ioc *ioc)
** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
*/
iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver));
iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
/* limit IOVA space size to 1MB-1GB */
@ -1305,7 +1305,7 @@ ccio_ioc_init(struct ioc *ioc)
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
__func__, ioc->ioc_regs,
(unsigned long) num_physpages >> (20 - PAGE_SHIFT),
(unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
iova_space_size>>20,
iov_order + PAGE_SHIFT);

View file

@ -1390,7 +1390,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
** for DMA hints - ergo only 30 bits max.
*/
iova_space_size = (u32) (num_physpages/global_ioc_cnt);
iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
/* limit IOVA space size to 1MB-1GB */
if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
@ -1415,7 +1415,7 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
__func__,
ioc->ioc_hpa,
(unsigned long) num_physpages >> (20 - PAGE_SHIFT),
(unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
iova_space_size>>20,
iov_order + PAGE_SHIFT);

View file

@ -96,11 +96,7 @@ static struct balloon_stats balloon_stats;
/* We increase/decrease in batches which fit in a page */
static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
/* VM /proc information for memory */
extern unsigned long totalram_pages;
#ifdef CONFIG_HIGHMEM
extern unsigned long totalhigh_pages;
#define inc_totalhigh_pages() (totalhigh_pages++)
#define dec_totalhigh_pages() (totalhigh_pages--)
#else

View file

@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
/* return (void *)__get_free_page(gfp_mask); */
}
if (likely(size >> PAGE_SHIFT < num_physpages))
if (likely((size >> PAGE_SHIFT) < totalram_pages))
return __vmalloc(size, gfp_mask, PAGE_KERNEL);
return NULL;
}

View file

@ -25,6 +25,7 @@ extern unsigned long max_mapnr;
#endif
extern unsigned long num_physpages;
extern unsigned long totalram_pages;
extern void * high_memory;
extern int page_cluster;

View file

@ -668,12 +668,12 @@ asmlinkage void __init start_kernel(void)
#endif
thread_info_cache_init();
cred_init();
fork_init(num_physpages);
fork_init(totalram_pages);
proc_caches_init();
buffer_init();
key_init();
security_init();
vfs_caches_init(num_physpages);
vfs_caches_init(totalram_pages);
radix_tree_init();
signals_init();
/* rootfs populating might need page-writeback */

View file

@ -1384,7 +1384,7 @@ void __init kmem_cache_init(void)
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
*/
if (num_physpages > (32 << 20) >> PAGE_SHIFT)
if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;
/* Bootstrap is tricky, because several objects are allocated

View file

@ -496,7 +496,7 @@ EXPORT_SYMBOL(pagevec_lookup_tag);
*/
void __init swap_setup(void)
{
unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
#ifdef CONFIG_SWAP
bdi_init(swapper_space.backing_dev_info);

View file

@ -1386,7 +1386,7 @@ void *vmap(struct page **pages, unsigned int count,
might_sleep();
if (count > num_physpages)
if (count > totalram_pages)
return NULL;
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
@ -1493,7 +1493,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
unsigned long real_size = size;
size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > num_physpages)
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
return NULL;
area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,

View file

@ -1206,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps);
void __init sk_init(void)
{
if (num_physpages <= 4096) {
if (totalram_pages <= 4096) {
sysctl_wmem_max = 32767;
sysctl_rmem_max = 32767;
sysctl_wmem_default = 32767;
sysctl_rmem_default = 32767;
} else if (num_physpages >= 131072) {
} else if (totalram_pages >= 131072) {
sysctl_wmem_max = 131071;
sysctl_rmem_max = 131071;
}

View file

@ -1049,10 +1049,10 @@ static int __init dccp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
if (num_physpages >= (128 * 1024))
goal = num_physpages >> (21 - PAGE_SHIFT);
if (totalram_pages >= (128 * 1024))
goal = totalram_pages >> (21 - PAGE_SHIFT);
else
goal = num_physpages >> (23 - PAGE_SHIFT);
goal = totalram_pages >> (23 - PAGE_SHIFT);
if (thash_entries)
goal = (thash_entries *

View file

@ -1750,7 +1750,7 @@ void __init dn_route_init(void)
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
goal = num_physpages >> (26 - PAGE_SHIFT);
goal = totalram_pages >> (26 - PAGE_SHIFT);
for(order = 0; (1UL << order) < goal; order++)
/* NOTHING */;

View file

@ -3414,7 +3414,7 @@ int __init ip_rt_init(void)
alloc_large_system_hash("IP route cache",
sizeof(struct rt_hash_bucket),
rhash_entries,
(num_physpages >= 128 * 1024) ?
(totalram_pages >= 128 * 1024) ?
15 : 17,
0,
&rt_hash_log,

View file

@ -2862,7 +2862,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
(num_physpages >= 128 * 1024) ?
(totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.ehash_size,
@ -2879,7 +2879,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_size,
(num_physpages >= 128 * 1024) ?
(totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.bhash_size,

View file

@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void)
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */
if (!nf_conntrack_htable_size) {
nf_conntrack_htable_size
= (((num_physpages << PAGE_SHIFT) / 16384)
= (((totalram_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 16384;
if (nf_conntrack_htable_size < 32)
nf_conntrack_htable_size = 32;

View file

@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
int cpu;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);

View file

@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
if (minfo->cfg.size)
size = minfo->cfg.size;
else {
size = ((num_physpages << PAGE_SHIFT) / 16384) /
size = ((totalram_pages << PAGE_SHIFT) / 16384) /
sizeof(struct list_head);
if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
size = 8192;
if (size < 16)
size = 16;
@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
if (minfo->cfg.size) {
size = minfo->cfg.size;
} else {
size = (num_physpages << PAGE_SHIFT) / 16384 /
size = (totalram_pages << PAGE_SHIFT) / 16384 /
sizeof(struct list_head);
if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE)
if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
size = 8192;
if (size < 16)
size = 16;

View file

@ -2091,10 +2091,10 @@ static int __init netlink_proto_init(void)
if (!nl_table)
goto panic;
if (num_physpages >= (128 * 1024))
limit = num_physpages >> (21 - PAGE_SHIFT);
if (totalram_pages >= (128 * 1024))
limit = totalram_pages >> (21 - PAGE_SHIFT);
else
limit = num_physpages >> (23 - PAGE_SHIFT);
limit = totalram_pages >> (23 - PAGE_SHIFT);
order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
limit = (1UL << order) / sizeof(struct hlist_head);

View file

@ -1184,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void)
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
*/
if (num_physpages >= (128 * 1024))
goal = num_physpages >> (22 - PAGE_SHIFT);
if (totalram_pages >= (128 * 1024))
goal = totalram_pages >> (22 - PAGE_SHIFT);
else
goal = num_physpages >> (24 - PAGE_SHIFT);
goal = totalram_pages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++)
;