aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug48
-rw-r--r--lib/Kconfig.kgdb58
-rw-r--r--lib/Makefile3
-rw-r--r--lib/bitmap.c174
-rw-r--r--lib/find_next_bit.c77
-rw-r--r--lib/kernel_lock.c1
-rw-r--r--lib/kobject.c19
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/lmb.c101
-rw-r--r--lib/pcounter.c58
-rw-r--r--lib/radix-tree.c9
-rw-r--r--lib/reed_solomon/reed_solomon.c1
-rw-r--r--lib/scatterlist.c102
-rw-r--r--lib/semaphore-sleepers.c176
15 files changed, 552 insertions, 287 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 2d53dc092e8b..8cc8e8722a3f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -7,6 +7,12 @@ menu "Library routines"
config BITREVERSE
tristate
+config GENERIC_FIND_FIRST_BIT
+ def_bool n
+
+config GENERIC_FIND_NEXT_BIT
+ def_bool n
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0796c1a090c0..754cc0027f2a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -25,6 +25,17 @@ config ENABLE_MUST_CHECK
suppress the "warning: ignoring return value of 'foo', declared with
attribute warn_unused_result" messages.
+config FRAME_WARN
+ int "Warn for stack frames larger than (needs gcc 4.4)"
+ range 0 8192
+ default 1024 if !64BIT
+ default 2048 if 64BIT
+ help
+ Tell gcc to warn at build time for stack frames larger than this.
+ Setting this too low will cause a lot of warnings.
+ Setting it to 0 disables the warning.
+ Requires gcc 4.4
+
config MAGIC_SYSRQ
bool "Magic SysRq key"
depends on !UML
@@ -211,7 +222,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
default n
bool "Enable SLUB performance statistics"
- depends on SLUB
+ depends on SLUB && SLUB_DEBUG && SYSFS
help
SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be
@@ -265,16 +276,6 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and
reported.
-config DEBUG_SEMAPHORE
- bool "Semaphore debugging"
- depends on DEBUG_KERNEL
- depends on ALPHA || FRV
- default n
- help
- If you say Y here then semaphore processing will issue lots of
- verbose debugging messages. If you suspect a semaphore problem or a
- kernel hacker asks for this option then say Y. Otherwise say N.
-
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -437,6 +438,16 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_WRITECOUNT
+ bool "Debug filesystem writers count"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to catch wrong use of the writers count in struct
+ vfsmount. This will increase the size of each file struct by
+ 32 bits.
+
+ If unsure, say N.
+
config DEBUG_LIST
bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
@@ -593,7 +604,7 @@ config LATENCYTOP
to find out which userspace is blocking on what kernel operations.
config PROVIDE_OHCI1394_DMA_INIT
- bool "Provide code for enabling DMA over FireWire early on boot"
+ bool "Remote debugging over FireWire early on boot"
depends on PCI && X86
help
If you want to debug problems which hang or crash the kernel early
@@ -621,4 +632,17 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
+config FIREWIRE_OHCI_REMOTE_DMA
+ bool "Remote debugging over FireWire with firewire-ohci"
+ depends on FIREWIRE_OHCI
+ help
+ This option lets you use the FireWire bus for remote debugging
+ with help of the firewire-ohci driver. It enables unfiltered
+ remote DMA in firewire-ohci.
+ See Documentation/debugging-via-ohci1394.txt for more information.
+
+ If unsure, say N.
+
source "samples/Kconfig"
+
+source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
new file mode 100644
index 000000000000..f2e01ac5ab09
--- /dev/null
+++ b/lib/Kconfig.kgdb
@@ -0,0 +1,58 @@
+
+menuconfig KGDB
+ bool "KGDB: kernel debugging with remote gdb"
+ select FRAME_POINTER
+ depends on HAVE_ARCH_KGDB
+ depends on DEBUG_KERNEL && EXPERIMENTAL
+ help
+ If you say Y here, it will be possible to remotely debug the
+ kernel using gdb. Documentation of kernel debugger is available
+ at http://kgdb.sourceforge.net as well as in DocBook form
+ in Documentation/DocBook/. If unsure, say N.
+
+config HAVE_ARCH_KGDB_SHADOW_INFO
+ bool
+
+config HAVE_ARCH_KGDB
+ bool
+
+config KGDB_SERIAL_CONSOLE
+ tristate "KGDB: use kgdb over the serial console"
+ depends on KGDB
+ select CONSOLE_POLL
+ select MAGIC_SYSRQ
+ default y
+ help
+ Share a serial console with kgdb. Sysrq-g must be used
+ to break in initially.
+
+config KGDB_TESTS
+ bool "KGDB: internal test suite"
+ depends on KGDB
+ default n
+ help
+ This is a kgdb I/O module specifically designed to test
+ kgdb's internal functions. This kgdb I/O module is
+ intended to for the development of new kgdb stubs
+ as well as regression testing the kgdb internals.
+ See the drivers/misc/kgdbts.c for the details about
+ the tests. The most basic of this I/O module is to boot
+ a kernel boot arguments "kgdbwait kgdbts=V1F100"
+
+config KGDB_TESTS_ON_BOOT
+ bool "KGDB: Run tests on boot"
+ depends on KGDB_TESTS
+ default n
+ help
+ Run the kgdb tests on boot up automatically without the need
+ to pass in a kernel parameter
+
+config KGDB_TESTS_BOOT_STRING
+ string "KGDB: which internal kgdb tests to run"
+ depends on KGDB_TESTS_ON_BOOT
+ default "V1F100"
+ help
+ This is the command string to send the kgdb test suite on
+ boot. See the drivers/misc/kgdbts.c for detailed
+ information about other strings you could use beyond the
+ default of V1F100.
diff --git a/lib/Makefile b/lib/Makefile
index 61bba16a0a2f..2d7001b7f5a4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
+lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
@@ -61,7 +61,6 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
-obj-$(CONFIG_SMP) += pcounter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2c9242e3fed0..c4cb48f77f0c 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,6 +316,22 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnprintf);
/**
+ * bitmap_scnprintf_len - return buffer length needed to convert
+ * bitmap to an ASCII hex string.
+ * @len: number of bits to be converted
+ */
+int bitmap_scnprintf_len(unsigned int len)
+{
+ /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */
+ int bitslen = ALIGN(len, CHUNKSZ);
+ int wordlen = CHUNKSZ / 4;
+ int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char);
+
+ return buflen;
+}
+EXPORT_SYMBOL(bitmap_scnprintf_len);
+
+/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
@@ -698,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
}
EXPORT_SYMBOL(bitmap_bitremap);
+/**
+ * bitmap_onto - translate one bitmap relative to another
+ * @dst: resulting translated bitmap
+ * @orig: original untranslated bitmap
+ * @relmap: bitmap relative to which translated
+ * @bits: number of bits in each of these bitmaps
+ *
+ * Set the n-th bit of @dst iff there exists some m such that the
+ * n-th bit of @relmap is set, the m-th bit of @orig is set, and
+ * the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
+ * (If you understood the previous sentence the first time your
+ * read it, you're overqualified for your current job.)
+ *
+ * In other words, @orig is mapped onto (surjectively) @dst,
+ * using the the map { <n, m> | the n-th bit of @relmap is the
+ * m-th set bit of @relmap }.
+ *
+ * Any set bits in @orig above bit number W, where W is the
+ * weight of (number of set bits in) @relmap are mapped nowhere.
+ * In particular, if for all bits m set in @orig, m >= W, then
+ * @dst will end up empty. In situations where the possibility
+ * of such an empty result is not desired, one way to avoid it is
+ * to use the bitmap_fold() operator, below, to first fold the
+ * @orig bitmap over itself so that all its set bits x are in the
+ * range 0 <= x < W. The bitmap_fold() operator does this by
+ * setting the bit (m % W) in @dst, for each bit (m) set in @orig.
+ *
+ * Example [1] for bitmap_onto():
+ * Let's say @relmap has bits 30-39 set, and @orig has bits
+ * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
+ * @dst will have bits 31, 33, 35, 37 and 39 set.
+ *
+ * When bit 0 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the first bit (if any)
+ * that is turned on in @relmap. Since bit 0 was off in the
+ * above example, we leave off that bit (bit 30) in @dst.
+ *
+ * When bit 1 is set in @orig (as in the above example), it
+ * means turn on the bit in @dst corresponding to whatever
+ * is the second bit that is turned on in @relmap. The second
+ * bit in @relmap that was turned on in the above example was
+ * bit 31, so we turned on bit 31 in @dst.
+ *
+ * Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
+ * because they were the 4th, 6th, 8th and 10th set bits
+ * set in @relmap, and the 4th, 6th, 8th and 10th bits of
+ * @orig (i.e. bits 3, 5, 7 and 9) were also set.
+ *
+ * When bit 11 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the twelth bit that is
+ * turned on in @relmap. In the above example, there were
+ * only ten bits turned on in @relmap (30..39), so that bit
+ * 11 was set in @orig had no affect on @dst.
+ *
+ * Example [2] for bitmap_fold() + bitmap_onto():
+ * Let's say @relmap has these ten bits set:
+ * 40 41 42 43 45 48 53 61 74 95
+ * (for the curious, that's 40 plus the first ten terms of the
+ * Fibonacci sequence.)
+ *
+ * Further lets say we use the following code, invoking
+ * bitmap_fold() then bitmap_onto, as suggested above to
+ * avoid the possitility of an empty @dst result:
+ *
+ * unsigned long *tmp; // a temporary bitmap's bits
+ *
+ * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
+ * bitmap_onto(dst, tmp, relmap, bits);
+ *
+ * Then this table shows what various values of @dst would be, for
+ * various @orig's. I list the zero-based positions of each set bit.
+ * The tmp column shows the intermediate result, as computed by
+ * using bitmap_fold() to fold the @orig bitmap modulo ten
+ * (the weight of @relmap).
+ *
+ * @orig tmp @dst
+ * 0 0 40
+ * 1 1 41
+ * 9 9 95
+ * 10 0 40 (*)
+ * 1 3 5 7 1 3 5 7 41 43 48 61
+ * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
+ * 0 9 18 27 0 9 8 7 40 61 74 95
+ * 0 10 20 30 0 40
+ * 0 11 22 33 0 1 2 3 40 41 42 43
+ * 0 12 24 36 0 2 4 6 40 42 45 53
+ * 78 102 211 1 2 8 41 42 74 (*)
+ *
+ * (*) For these marked lines, if we hadn't first done bitmap_fold()
+ * into tmp, then the @dst result would have been empty.
+ *
+ * If either of @orig or @relmap is empty (no set bits), then @dst
+ * will be returned empty.
+ *
+ * If (as explained above) the only set bits in @orig are in positions
+ * m where m >= W, (where W is the weight of @relmap) then @dst will
+ * once again be returned empty.
+ *
+ * All bits in @dst not set by the above rule are cleared.
+ */
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, int bits)
+{
+ int n, m; /* same meaning as in above comment */
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, bits);
+
+ /*
+ * The following code is a more efficient, but less
+ * obvious, equivalent to the loop:
+ * for (m = 0; m < bitmap_weight(relmap, bits); m++) {
+ * n = bitmap_ord_to_pos(orig, m, bits);
+ * if (test_bit(m, orig))
+ * set_bit(n, dst);
+ * }
+ */
+
+ m = 0;
+ for (n = find_first_bit(relmap, bits);
+ n < bits;
+ n = find_next_bit(relmap, bits, n + 1)) {
+ /* m == bitmap_pos_to_ord(relmap, n, bits) */
+ if (test_bit(m, orig))
+ set_bit(n, dst);
+ m++;
+ }
+}
+EXPORT_SYMBOL(bitmap_onto);
+
+/**
+ * bitmap_fold - fold larger bitmap into smaller, modulo specified size
+ * @dst: resulting smaller bitmap
+ * @orig: original larger bitmap
+ * @sz: specified size
+ * @bits: number of bits in each of these bitmaps
+ *
+ * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
+ * Clear all other bits in @dst. See further the comment and
+ * Example [2] for bitmap_onto() for why and how to use this.
+ */
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ int sz, int bits)
+{
+ int oldbit;
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, bits);
+
+ for (oldbit = find_first_bit(orig, bits);
+ oldbit < bits;
+ oldbit = find_next_bit(orig, bits, oldbit + 1))
+ set_bit(oldbit % sz, dst);
+}
+EXPORT_SYMBOL(bitmap_fold);
+
/*
* Common code for bitmap_*_region() routines.
* bitmap: array of unsigned longs corresponding to the bitmap
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index 78ccd73a8841..d3f5784807b4 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -16,14 +16,12 @@
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
+/*
+ * Find the next set bit in a memory region.
*/
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+unsigned long __find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -60,15 +58,14 @@ found_first:
found_middle:
return result + __ffs(tmp);
}
-
-EXPORT_SYMBOL(find_next_bit);
+EXPORT_SYMBOL(__find_next_bit);
/*
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+unsigned long __find_next_zero_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BITOP_WORD(offset);
unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -105,8 +102,64 @@ found_first:
found_middle:
return result + ffz(tmp);
}
+EXPORT_SYMBOL(__find_next_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long __find_first_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
-EXPORT_SYMBOL(find_next_zero_bit);
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(__find_first_bit);
+
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long __find_first_zero_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) | (~0UL << size);
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found:
+ return result + ffz(tmp);
+}
+EXPORT_SYMBOL(__find_first_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
#ifdef __BIG_ENDIAN
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf00844b..cd3e82530b03 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
+#include <linux/semaphore.h>
/*
* The 'big kernel semaphore'
diff --git a/lib/kobject.c b/lib/kobject.c
index 0d03252f87a8..2c6490370922 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -58,11 +58,6 @@ static int create_dir(struct kobject *kobj)
return error;
}
-static inline struct kobject *to_kobj(struct list_head *entry)
-{
- return container_of(entry, struct kobject, entry);
-}
-
static int get_kobj_path_length(struct kobject *kobj)
{
int length = 1;
@@ -592,8 +587,15 @@ static void kobject_release(struct kref *kref)
*/
void kobject_put(struct kobject *kobj)
{
- if (kobj)
+ if (kobj) {
+ if (!kobj->state_initialized) {
+ printk(KERN_WARNING "kobject: '%s' (%p): is not "
+ "initialized, yet kobject_put() is being "
+ "called.\n", kobject_name(kobj), kobj);
+ WARN_ON(1);
+ }
kref_put(&kobj->kref, kobject_release);
+ }
}
static void dynamic_kobj_release(struct kobject *kobj)
@@ -745,12 +747,11 @@ void kset_unregister(struct kset *k)
*/
struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
- struct list_head *entry;
+ struct kobject *k;
struct kobject *ret = NULL;
spin_lock(&kset->list_lock);
- list_for_each(entry, &kset->list) {
- struct kobject *k = to_kobj(entry);
+ list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
ret = kobject_get(k);
break;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5b6d7f6956b9..9fb6b86cf6b1 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -15,11 +15,13 @@
*/
#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
-#include <linux/string.h>
-#include <linux/kobject.h>
#include <net/sock.h>
diff --git a/lib/lmb.c b/lib/lmb.c
index 896e2832099e..83287d3869a3 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -46,14 +46,13 @@ void lmb_dump_all(void)
#endif /* DEBUG */
}
-static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1,
- u64 base2, u64 size2)
+static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
+ u64 size2)
{
return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}
-static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
- u64 base2, u64 size2)
+static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
{
if (base2 == base1 + size1)
return 1;
@@ -63,7 +62,7 @@ static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
return 0;
}
-static long __init lmb_regions_adjacent(struct lmb_region *rgn,
+static long lmb_regions_adjacent(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
u64 base1 = rgn->region[r1].base;
@@ -74,7 +73,7 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn,
return lmb_addrs_adjacent(base1, size1, base2, size2);
}
-static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
{
unsigned long i;
@@ -86,7 +85,7 @@ static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
}
/* Assumption: base addr of region 1 < base addr of region 2 */
-static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+static void lmb_coalesce_regions(struct lmb_region *rgn,
unsigned long r1, unsigned long r2)
{
rgn->region[r1].size += rgn->region[r2].size;
@@ -118,7 +117,7 @@ void __init lmb_analyze(void)
lmb.memory.size += lmb.memory.region[i].size;
}
-static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
+static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
{
unsigned long coalesced = 0;
long adjacent, i;
@@ -182,7 +181,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
return 0;
}
-long __init lmb_add(u64 base, u64 size)
+long lmb_add(u64 base, u64 size)
{
struct lmb_region *_rgn = &lmb.memory;
@@ -194,6 +193,55 @@ long __init lmb_add(u64 base, u64 size)
}
+long lmb_remove(u64 base, u64 size)
+{
+ struct lmb_region *rgn = &(lmb.memory);
+ u64 rgnbegin, rgnend;
+ u64 end = base + size;
+ int i;
+
+ rgnbegin = rgnend = 0; /* supress gcc warnings */
+
+ /* Find the region where (base, size) belongs to */
+ for (i=0; i < rgn->cnt; i++) {
+ rgnbegin = rgn->region[i].base;
+ rgnend = rgnbegin + rgn->region[i].size;
+
+ if ((rgnbegin <= base) && (end <= rgnend))
+ break;
+ }
+
+ /* Didn't find the region */
+ if (i == rgn->cnt)
+ return -1;
+
+ /* Check to see if we are removing entire region */
+ if ((rgnbegin == base) && (rgnend == end)) {
+ lmb_remove_region(rgn, i);
+ return 0;
+ }
+
+ /* Check to see if region is matching at the front */
+ if (rgnbegin == base) {
+ rgn->region[i].base = end;
+ rgn->region[i].size -= size;
+ return 0;
+ }
+
+ /* Check to see if the region is matching at the end */
+ if (rgnend == end) {
+ rgn->region[i].size -= size;
+ return 0;
+ }
+
+ /*
+ * We need to split the entry - adjust the current one to the
+ * beginging of the hole and add the region after hole.
+ */
+ rgn->region[i].size = base - rgn->region[i].base;
+ return lmb_add_region(rgn, end, rgnend - end);
+}
+
long __init lmb_reserve(u64 base, u64 size)
{
struct lmb_region *_rgn = &lmb.reserved;
@@ -346,7 +394,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
if (j < 0) {
/* this area isn't reserved, take it */
if (lmb_add_region(&lmb.reserved, base,
- size) < 0)
+ lmb_align_up(size, align)) < 0)
return 0;
return base;
}
@@ -426,3 +474,36 @@ int __init lmb_is_reserved(u64 addr)
}
return 0;
}
+
+/*
+ * Given a <base, len>, find which memory regions belong to this range.
+ * Adjust the request and return a contiguous chunk.
+ */
+int lmb_find(struct lmb_property *res)
+{
+ int i;
+ u64 rstart, rend;
+
+ rstart = res->base;
+ rend = rstart + res->size - 1;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 start = lmb.memory.region[i].base;
+ u64 end = start + lmb.memory.region[i].size - 1;
+
+ if (start > rend)
+ return -1;
+
+ if ((end >= rstart) && (start < rend)) {
+ /* adjust the request */
+ if (rstart < start)
+ rstart = start;
+ if (rend > end)
+ rend = end;
+ res->base = rstart;
+ res->size = rend - rstart + 1;
+ return 0;
+ }
+ }
+ return -1;
+}
diff --git a/lib/pcounter.c b/lib/pcounter.c
deleted file mode 100644
index 9b56807da93b..000000000000
--- a/lib/pcounter.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Define default pcounter functions
- * Note that often used pcounters use dedicated functions to get a speed increase.
- * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER)
- */
-
-#include <linux/module.h>
-#include <linux/pcounter.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-
-static void pcounter_dyn_add(struct pcounter *self, int inc)
-{
- per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc;
-}
-
-static int pcounter_dyn_getval(const struct pcounter *self, int cpu)
-{
- return per_cpu_ptr(self->per_cpu_values, cpu)[0];
-}
-
-int pcounter_getval(const struct pcounter *self)
-{
- int res = 0, cpu;
-
- for_each_possible_cpu(cpu)
- res += self->getval(self, cpu);
-
- return res;
-}
-EXPORT_SYMBOL_GPL(pcounter_getval);
-
-int pcounter_alloc(struct pcounter *self)
-{
- int rc = 0;
- if (self->add == NULL) {
- self->per_cpu_values = alloc_percpu(int);
- if (self->per_cpu_values != NULL) {
- self->add = pcounter_dyn_add;
- self->getval = pcounter_dyn_getval;
- } else
- rc = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL_GPL(pcounter_alloc);
-
-void pcounter_free(struct pcounter *self)
-{
- if (self->per_cpu_values != NULL) {
- free_percpu(self->per_cpu_values);
- self->per_cpu_values = NULL;
- self->getval = NULL;
- self->add = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(pcounter_free);
-
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 65f0e758ec38..bd521716ab1a 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
}
}
if (ret == NULL)
- ret = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
BUG_ON(radix_tree_is_indirect_ptr(ret));
return ret;
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask)
rtp = &__get_cpu_var(radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
- node = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
- SLAB_PANIC, radix_tree_node_ctor);
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+ radix_tree_node_ctor);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index 3ea2db94d5b0..06d04cfa9339 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -45,7 +45,6 @@
#include <linux/rslib.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <asm/semaphore.h>
/* This list holds all currently allocated rs control structures */
static LIST_HEAD (rslist);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index acca4901046c..b80c21100d78 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/highmem.h>
/**
* sg_next - return the next scatterlist entry in a list
@@ -292,3 +293,104 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
+
+/**
+ * sg_copy_buffer - Copy data between a linear buffer and an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ * @to_buffer: transfer direction (non zero == from an sg list to a
+ * buffer, 0 == from a buffer to an sg list
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, int to_buffer)
+{
+ struct scatterlist *sg;
+ size_t buf_off = 0;
+ int i;
+
+ WARN_ON(!irqs_disabled());
+
+ for_each_sg(sgl, sg, nents, i) {
+ struct page *page;
+ int n = 0;
+ unsigned int sg_off = sg->offset;
+ unsigned int sg_copy = sg->length;
+
+ if (sg_copy > buflen)
+ sg_copy = buflen;
+ buflen -= sg_copy;
+
+ while (sg_copy > 0) {
+ unsigned int page_copy;
+ void *p;
+
+ page_copy = PAGE_SIZE - sg_off;
+ if (page_copy > sg_copy)
+ page_copy = sg_copy;
+
+ page = nth_page(sg_page(sg), n);
+ p = kmap_atomic(page, KM_BIO_SRC_IRQ);
+
+ if (to_buffer)
+ memcpy(buf + buf_off, p + sg_off, page_copy);
+ else {
+ memcpy(p + sg_off, buf + buf_off, page_copy);
+ flush_kernel_dcache_page(page);
+ }
+
+ kunmap_atomic(p, KM_BIO_SRC_IRQ);
+
+ buf_off += page_copy;
+ sg_off += page_copy;
+ if (sg_off == PAGE_SIZE) {
+ sg_off = 0;
+ n++;
+ }
+ sg_copy -= page_copy;
+ }
+
+ if (!buflen)
+ break;
+ }
+
+ return buf_off;
+}
+
+/**
+ * sg_copy_from_buffer - Copy from a linear buffer to an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, 0);
+}
+EXPORT_SYMBOL(sg_copy_from_buffer);
+
+/**
+ * sg_copy_to_buffer - Copy from an SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, 1);
+}
+EXPORT_SYMBOL(sg_copy_to_buffer);
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782cdacb..000000000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * i386 and x86-64 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is protected
- * by the spinlock in the semaphore's waitqueue head.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-
-void __sched __down(struct semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * the wait_queue_head.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_UNINTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- tsk->state = TASK_RUNNING;
-}
-
-int __sched __down_interruptible(struct semaphore *sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- unsigned long flags;
-
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irqsave(&sem->wait.lock, flags);
- add_wait_queue_exclusive_locked(&sem->wait, &wait);
-
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock in
- * wait_queue_head. The "-1" is because we're
- * still hoping to get the semaphore.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- schedule();
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- tsk->state = TASK_INTERRUPTIBLE;
- }
- remove_wait_queue_locked(&sem->wait, &wait);
- wake_up_locked(&sem->wait);
- spin_unlock_irqrestore(&sem->wait.lock, flags);
-
- tsk->state = TASK_RUNNING;
- return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-int __down_trylock(struct semaphore *sem)
-{
- int sleepers;
- unsigned long flags;
-
- spin_lock_irqsave(&sem->wait.lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock in the
- * wait_queue_head.
- */
- if (!atomic_add_negative(sleepers, &sem->count)) {
- wake_up_locked(&sem->wait);
- }
-
- spin_unlock_irqrestore(&sem->wait.lock, flags);
- return 1;
-}