aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/cgroups/cpusets.txt12
-rw-r--r--Documentation/gcov.txt25
-rw-r--r--Documentation/kmemleak.txt23
-rw-r--r--Documentation/spi/spidev_test.c10
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/alpha/include/asm/percpu.h6
-rw-r--r--arch/frv/include/asm/unistd.h4
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/esi.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kvm/process.c6
-rw-r--r--arch/ia64/kvm/vcpu.c2
-rw-r--r--arch/ia64/kvm/vtlb.c4
-rw-r--r--arch/ia64/sn/kernel/io_common.c3
-rw-r--r--arch/mn10300/include/asm/unistd.h4
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/powerpc/include/asm/perf_counter.h2
-rw-r--r--arch/x86/include/asm/perf_counter.h3
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c22
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/edac/edac_core.h4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/mpc85xx_edac.h1
-rw-r--r--drivers/gpio/pl061.c20
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c4
-rw-r--r--drivers/md/dm-exception-store.c9
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/netdev.c3
-rw-r--r--drivers/net/igb/igb_main.c14
-rw-r--r--drivers/net/irda/bfin_sir.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c45
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc95xx.c10
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/rtc/rtc-bfin.c30
-rw-r--r--drivers/serial/8250_pci.c6
-rw-r--r--drivers/spi/omap_uwire.c2
-rw-r--r--drivers/spi/spi_bitbang.c24
-rw-r--r--drivers/spi/spidev.c17
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/atyfb.h3
-rw-r--r--drivers/video/aty/atyfb_base.c141
-rw-r--r--drivers/video/aty/mach64_accel.c7
-rw-r--r--drivers/video/fbmem.c13
-rw-r--r--drivers/video/fsl-diu-fb.c14
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c3
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c5
-rw-r--r--drivers/video/mx3fb.c17
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pxafb.c2
-rw-r--r--drivers/video/sh7760fb.c19
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sm501fb.c21
-rw-r--r--drivers/video/w100fb.c2
-rw-r--r--fs/afs/flock.c1
-rw-r--r--fs/aio.c24
-rw-r--r--fs/binfmt_elf.c5
-rw-r--r--fs/eventfd.c122
-rw-r--r--fs/ext2/namei.c12
-rw-r--r--fs/hostfs/hostfs_kern.c1
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/eventfd.h35
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/netfilter/xt_conntrack.h13
-rw-r--r--include/linux/netfilter/xt_osf.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-defs.h3
-rw-r--r--include/linux/perf_counter.h46
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/spidev.h2
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--kernel/acct.c6
-rw-r--r--kernel/perf_counter.c320
-rw-r--r--kernel/pid.c7
-rw-r--r--kernel/resource.c2
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/kmemleak.c197
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/page_alloc.c13
-rw-r--r--net/ieee802154/netlink.c6
-rw-r--r--net/ipv4/arp.c7
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c17
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/xt_conntrack.c66
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/xfrm/xfrm_state.c57
-rw-r--r--scripts/pnmtologo.c4
-rw-r--r--tools/perf/CREDITS30
-rw-r--r--tools/perf/Documentation/perf-report.txt14
-rw-r--r--tools/perf/Documentation/perf-stat.txt6
-rw-r--r--tools/perf/Makefile6
-rw-r--r--tools/perf/builtin-annotate.c8
-rw-r--r--tools/perf/builtin-record.c127
-rw-r--r--tools/perf/builtin-report.c236
-rw-r--r--tools/perf/builtin-stat.c171
-rw-r--r--tools/perf/builtin-top.c11
-rw-r--r--tools/perf/perf.h9
-rw-r--r--tools/perf/util/callchain.c174
-rw-r--r--tools/perf/util/callchain.h33
-rw-r--r--tools/perf/util/header.c242
-rw-r--r--tools/perf/util/header.h37
-rw-r--r--tools/perf/util/help.c15
-rw-r--r--tools/perf/util/pager.c5
-rw-r--r--tools/perf/util/parse-events.c153
-rw-r--r--tools/perf/util/run-command.c95
-rw-r--r--tools/perf/util/run-command.h5
-rw-r--r--tools/perf/util/strbuf.c2
-rw-r--r--tools/perf/util/string.h2
-rw-r--r--tools/perf/util/strlist.c184
-rw-r--r--tools/perf/util/strlist.h32
-rw-r--r--tools/perf/util/symbol.c16
-rw-r--r--tools/perf/util/symbol.h5
-rw-r--r--tools/perf/util/types.h (renamed from tools/perf/types.h)0
-rw-r--r--tools/perf/util/util.h15
139 files changed, 2530 insertions, 898 deletions
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index f9ca389dddf4..1d7e9784439a 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -777,6 +777,18 @@ in cpuset directories:
# /bin/echo 1-4 > cpus -> set cpus list to cpus 1,2,3,4
# /bin/echo 1,2,3,4 > cpus -> set cpus list to cpus 1,2,3,4
+To add a CPU to a cpuset, write the new list of CPUs including the
+CPU to be added. To add 6 to the above cpuset:
+
+# /bin/echo 1-4,6 > cpus -> set cpus list to cpus 1,2,3,4,6
+
+Similarly to remove a CPU from a cpuset, write the new list of CPUs
+without the CPU to be removed.
+
+To remove all the CPUs:
+
+# /bin/echo "" > cpus -> clear cpus list
+
2.3 Setting flags
-----------------
diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt
index e716aadb3a33..40ec63352760 100644
--- a/Documentation/gcov.txt
+++ b/Documentation/gcov.txt
@@ -188,13 +188,18 @@ Solution: Exclude affected source files from profiling by specifying
GCOV_PROFILE := n or GCOV_PROFILE_basename.o := n in the
corresponding Makefile.
+Problem: Files copied from sysfs appear empty or incomplete.
+Cause: Due to the way seq_file works, some tools such as cp or tar
+ may not correctly copy files from sysfs.
+Solution: Use 'cat' to read .gcda files and 'cp -d' to copy links.
+ Alternatively use the mechanism shown in Appendix B.
+
Appendix A: gather_on_build.sh
==============================
Sample script to gather coverage meta files on the build machine
(see 6a):
-
#!/bin/bash
KSRC=$1
@@ -226,7 +231,7 @@ Appendix B: gather_on_test.sh
Sample script to gather coverage data files on the test machine
(see 6b):
-#!/bin/bash
+#!/bin/bash -e
DEST=$1
GCDA=/sys/kernel/debug/gcov
@@ -236,11 +241,13 @@ if [ -z "$DEST" ] ; then
exit 1
fi
-find $GCDA -name '*.gcno' -o -name '*.gcda' | tar cfz $DEST -T -
+TEMPDIR=$(mktemp -d)
+echo Collecting data..
+find $GCDA -type d -exec mkdir -p $TEMPDIR/\{\} \;
+find $GCDA -name '*.gcda' -exec sh -c 'cat < $0 > '$TEMPDIR'/$0' {} \;
+find $GCDA -name '*.gcno' -exec sh -c 'cp -d $0 '$TEMPDIR'/$0' {} \;
+tar czf $DEST -C $TEMPDIR sys
+rm -rf $TEMPDIR
-if [ $? -eq 0 ] ; then
- echo "$DEST successfully created, copy to build system and unpack with:"
- echo " tar xfz $DEST"
-else
- echo "Could not create file $DEST"
-fi
+echo "$DEST successfully created, copy to build system and unpack with:"
+echo " tar xfz $DEST"
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 0112da3b9ab8..89068030b01b 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -16,13 +16,17 @@ Usage
-----
CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel
-thread scans the memory every 10 minutes (by default) and prints any new
-unreferenced objects found. To trigger an intermediate scan and display
-all the possible memory leaks:
+thread scans the memory every 10 minutes (by default) and prints the
+number of new unreferenced objects found. To display the details of all
+the possible memory leaks:
# mount -t debugfs nodev /sys/kernel/debug/
# cat /sys/kernel/debug/kmemleak
+To trigger an intermediate memory scan:
+
+ # echo scan > /sys/kernel/debug/kmemleak
+
Note that the orphan objects are listed in the order they were allocated
and one object at the beginning of the list may cause other subsequent
objects to be reported as orphan.
@@ -31,16 +35,21 @@ Memory scanning parameters can be modified at run-time by writing to the
/sys/kernel/debug/kmemleak file. The following parameters are supported:
off - disable kmemleak (irreversible)
- stack=on - enable the task stacks scanning
+ stack=on - enable the task stacks scanning (default)
stack=off - disable the tasks stacks scanning
- scan=on - start the automatic memory scanning thread
+ scan=on - start the automatic memory scanning thread (default)
scan=off - stop the automatic memory scanning thread
- scan=<secs> - set the automatic memory scanning period in seconds (0
- to disable it)
+ scan=<secs> - set the automatic memory scanning period in seconds
+ (default 600, 0 to stop the automatic scanning)
+ scan - trigger a memory scan
Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
the kernel command line.
+Memory may be allocated or freed before kmemleak is initialised and
+these actions are stored in an early log buffer. The size of this buffer
+is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
+
Basic Algorithm
---------------
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index cf0e3ce0d526..c1a5aad3c75a 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -99,11 +99,13 @@ void parse_opts(int argc, char *argv[])
{ "lsb", 0, 0, 'L' },
{ "cs-high", 0, 0, 'C' },
{ "3wire", 0, 0, '3' },
+ { "no-cs", 0, 0, 'N' },
+ { "ready", 0, 0, 'R' },
{ NULL, 0, 0, 0 },
};
int c;
- c = getopt_long(argc, argv, "D:s:d:b:lHOLC3", lopts, NULL);
+ c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR", lopts, NULL);
if (c == -1)
break;
@@ -139,6 +141,12 @@ void parse_opts(int argc, char *argv[])
case '3':
mode |= SPI_3WIRE;
break;
+ case 'N':
+ mode |= SPI_NO_CS;
+ break;
+ case 'R':
+ mode |= SPI_READY;
+ break;
default:
print_usage(argv[0]);
break;
diff --git a/MAINTAINERS b/MAINTAINERS
index 05a9a563042f..381190c7949c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2130,9 +2130,9 @@ F: drivers/edac/i5400_edac.c
EDAC-I82975X
P: Ranganathan Desikan
P: Arvind R.
L: [email protected] (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Maintained
@@ -2851,7 +2851,9 @@ S: Maintained
IA64 (Itanium) PLATFORM
P: Tony Luck
+P: Fenghua Yu
W: http://www.ia64-linux.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6.git
@@ -2929,7 +2931,7 @@ P: Dmitry Eremin-Solenikov
P: Sergey Lapin
+L: [email protected] (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
@@ -5576,8 +5578,8 @@ F: drivers/staging/
STARFIRE/DURALAN NETWORK DRIVER
P: Ion Badulescu
-S: Maintained
+S: Odd Fixes
F: drivers/net/starfire*
STARMODE RADIO IP (STRIP) PROTOCOL DRIVER
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h
index 06c5c7a4afd3..b663f1f10b6a 100644
--- a/arch/alpha/include/asm/percpu.h
+++ b/arch/alpha/include/asm/percpu.h
@@ -30,7 +30,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#ifndef MODULE
#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset))
-#define PER_CPU_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
#else
/*
* To calculate addresses of locally defined variables, GCC uses 32-bit
@@ -49,7 +49,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
: "=&r"(__ptr), "=&r"(tmp_gp)); \
(typeof(&per_cpu_var(var)))(__ptr + (offset)); })
-#define PER_CPU_ATTRIBUTES __used
+#define PER_CPU_DEF_ATTRIBUTES __used
#endif /* MODULE */
@@ -71,7 +71,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
-#define PER_CPU_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
#endif /* SMP */
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 96d78d5d2c41..4a8fb427ce0a 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -341,10 +341,12 @@
#define __NR_inotify_init1 332
#define __NR_preadv 333
#define __NR_pwritev 334
+#define __NR_rt_tgsigqueueinfo 335
+#define __NR_perf_counter_open 336
#ifdef __KERNEL__
-#define NR_syscalls 335
+#define NR_syscalls 337
#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 356e0e327a89..fde1e446b440 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1524,5 +1524,7 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev
+ .long sys_rt_tgsigqueueinfo /* 335 */
+ .long sys_perf_counter_open
syscall_table_size = (. - sys_call_table)
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
index ebf4e988e78c..d5764a3d74af 100644
--- a/arch/ia64/kernel/esi.c
+++ b/arch/ia64/kernel/esi.c
@@ -65,7 +65,7 @@ static int __init esi_init (void)
}
if (!esi)
- return -ENODEV;;
+ return -ENODEV;
systab = __va(esi);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index abce2468a40b..f1782705b1f7 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
* /proc/perfmon interface, for debug only
*/
-#define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
+#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
static void *
pfm_proc_start(struct seq_file *m, loff_t *pos)
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 7053c55b7649..e6676fca4828 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -192,7 +192,7 @@ struct salinfo_platform_oemdata_parms {
static void
salinfo_work_to_do(struct salinfo_data *data)
{
- down_trylock(&data->mutex);
+ (void)(down_trylock(&data->mutex) ?: 0);
up(&data->mutex);
}
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index a8f84da04b49..bb862fb224f2 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -130,7 +130,7 @@ static void collect_interruption(struct kvm_vcpu *vcpu)
if (vdcr & IA64_DCR_PP) {
vpsr |= IA64_PSR_PP;
} else {
- vpsr &= ~IA64_PSR_PP;;
+ vpsr &= ~IA64_PSR_PP;
}
vcpu_set_psr(vcpu, vpsr);
@@ -594,11 +594,11 @@ static void set_pal_call_data(struct kvm_vcpu *vcpu)
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
break;
case PAL_BRAND_INFO:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
break;
default:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
}
p->u.pal_data.gr28 = gr28;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a2c6c15e4761..46b02cbcc874 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -406,7 +406,7 @@ void getreg(unsigned long regnum, unsigned long *val,
* Now look at registers in [0-31] range and init correct UNAT
*/
addr = (unsigned long)regs;
- unat = &regs->eml_unat;;
+ unat = &regs->eml_unat;
addr += gr_info[regnum];
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4290a429bf7c..20b3852f7a6e 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -135,7 +135,7 @@ struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
u64 rid;
rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;;
+ rid = rid & RR_RID_MASK;
if (type == D_TLB) {
if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
@@ -518,7 +518,7 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
struct thash_cb *hcb = &v->arch.vtlb;
- cch = __vtr_lookup(v, va, is_data);;
+ cch = __vtr_lookup(v, va, is_data);
if (cch)
return cch;
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 76645cf6ac5d..25831c47c579 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -435,7 +435,8 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
bricktype = MODULE_GET_BTYPE(moduleid);
if ((bricktype == L1_BRICKTYPE_191010) ||
(bricktype == L1_BRICKTYPE_1932))
- sprintf(address, "%s^%d", address, geo_slot(geoid));
+ sprintf(address + strlen(address), "^%d",
+ geo_slot(geoid));
}
void __devinit
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index fef5b434dadc..fad68616af32 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -346,10 +346,12 @@
#define __NR_inotify_init1 333
#define __NR_preadv 334
#define __NR_pwritev 335
+#define __NR_rt_tgsigqueueinfo 336
+#define __NR_perf_counter_open 337
#ifdef __KERNEL__
-#define NR_syscalls 326
+#define NR_syscalls 338
/*
* specify the deprecated syscalls we want to support on this arch
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 7408a27199f3..e0d2563af4f2 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -722,6 +722,8 @@ ENTRY(sys_call_table)
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 335 */
+ .long sys_rt_tgsigqueueinfo
+ .long sys_perf_counter_open
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index 8ccd4e155768..0ea0639fcf75 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -61,6 +61,8 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+#define PERF_COUNTER_INDEX_OFFSET 1
+
/*
* Only override the default definitions in include/linux/perf_counter.h
* if we have hardware PMU support.
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index 5fb33e160ea0..fa64e401589d 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -87,6 +87,9 @@ union cpuid10_edx {
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
extern void perf_counters_lapic_init(void);
+
+#define PERF_COUNTER_INDEX_OFFSET 0
+
#else
static inline void init_hw_perf_counters(void) { }
static inline void perf_counters_lapic_init(void) { }
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 76dfef23f789..d4cf4ce19aac 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -401,7 +401,7 @@ static const u64 amd_hw_cache_event_ids
[ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */
+ [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
err = checking_wrmsrl(hwc->counter_base + idx,
(u64)(-left) & x86_pmu.counter_mask);
+ perf_counter_update_userpage(counter);
+
return ret;
}
@@ -969,13 +971,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
if (!x86_pmu.num_counters_fixed)
return -1;
- /*
- * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86_model == 28)
- return -1;
-
event = hwc->config & ARCH_PERFMON_EVENT_MASK;
if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1041,6 +1036,8 @@ try_generic:
x86_perf_counter_set_period(counter, hwc, idx);
x86_pmu.enable(hwc, idx);
+ perf_counter_update_userpage(counter);
+
return 0;
}
@@ -1133,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
x86_perf_counter_update(counter, hwc, idx);
cpuc->counters[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
+
+ perf_counter_update_userpage(counter);
}
/*
@@ -1428,8 +1427,6 @@ static int intel_pmu_init(void)
*/
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
-
/*
* Install the hw-cache-events table:
*/
@@ -1499,21 +1496,22 @@ void __init init_hw_perf_counters(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
perf_max_counters = x86_pmu.num_counters;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
perf_counter_mask |=
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+ x86_pmu.intel_ctrl = perf_counter_mask;
perf_counters_lapic_init();
register_die_notifier(&perf_counter_nmi_notifier);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index c4378f4fd4a5..b177652251a4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -598,6 +598,8 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
+ /* clear the default setting with node 0 */
+ nodes_clear(node_states[N_NORMAL_MEMORY]);
free_area_init_nodes(max_zone_pfns);
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 862b40c90181..91b753013780 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3327,7 +3327,10 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
- LOCK_FDC(drive, 1);
+ if (lock_fdc(drive, 1)) {
+ mutex_unlock(&open_lock);
+ return -EINTR;
+ }
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 3493c6bdb820..871c13b4c148 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -150,6 +150,8 @@ enum mem_type {
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
MEM_XDR, /* Rambus XDR */
+ MEM_DDR3, /* DDR3 RAM */
+ MEM_RDDR3, /* Registered DDR3 RAM */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -167,6 +169,8 @@ enum mem_type {
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ad218fe4942d..e1d4ce083481 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -94,7 +94,9 @@ static const char *mem_types[] = {
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
[MEM_RDDR2] = "Registered-DDR2",
- [MEM_XDR] = "XDR"
+ [MEM_XDR] = "XDR",
+ [MEM_DDR3] = "Unbuffered-DDR3",
+ [MEM_RDDR3] = "Registered-DDR3"
};
static const char *dev_types[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 7c8c2d72916f..3f2ccfc6407c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -757,6 +757,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_RDDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_RDDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
@@ -769,6 +772,9 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
case DSC_SDTYPE_DDR2:
mtype = MEM_DDR2;
break;
+ case DSC_SDTYPE_DDR3:
+ mtype = MEM_DDR3;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 135b3539a030..52432ee7c4b9 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -53,6 +53,7 @@
#define DSC_SDTYPE_DDR 0x02000000
#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_SDTYPE_DDR3 0x07000000
#define DSC_X32_EN 0x00000020
/* Err_Int_En */
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index aa8e7cb020d9..4ee4c8367a3f 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -109,6 +109,16 @@ static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
writeb(!!value << offset, chip->base + (1 << (offset + 2)));
}
+static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+
+ if (chip->irq_base == (unsigned) -1)
+ return -EINVAL;
+
+ return chip->irq_base + offset;
+}
+
/*
* PL061 GPIO IRQ
*/
@@ -200,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->chip->ack(irq);
list_for_each(ptr, chip_list) {
unsigned long pending;
- int gpio;
+ int offset;
chip = list_entry(ptr, struct pl061_gpio, list);
pending = readb(chip->base + GPIOMIS);
@@ -209,8 +219,8 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
if (pending == 0)
continue;
- for_each_bit(gpio, &pending, PL061_GPIO_NR)
- generic_handle_irq(gpio_to_irq(gpio));
+ for_each_bit(offset, &pending, PL061_GPIO_NR)
+ generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->chip->unmask(irq);
}
@@ -221,7 +231,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
struct pl061_gpio *chip;
struct list_head *chip_list;
int ret, irq, i;
- static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)];
+ static DECLARE_BITMAP(init_irq, NR_IRQS);
pdata = dev->dev.platform_data;
if (pdata == NULL)
@@ -251,6 +261,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
chip->gc.set = pl061_set_value;
+ chip->gc.to_irq = pl061_to_irq;
chip->gc.base = pdata->gpio_base;
chip->gc.ngpio = PL061_GPIO_NR;
chip->gc.label = dev_name(&dev->dev);
@@ -280,6 +291,7 @@ static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
if (chip_list == NULL) {
+ clear_bit(irq, init_irq);
ret = -ENOMEM;
goto iounmap;
}
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d4e8979735cb..9c3138265f8e 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -82,7 +82,7 @@ struct lg_cpu {
struct lg_eventfd {
unsigned long addr;
- struct file *event;
+ struct eventfd_ctx *event;
};
struct lg_eventfd_map {
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 32e297121058..9f9a2953b383 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
/* Now append new entry. */
new->map[new->num].addr = addr;
- new->map[new->num].event = eventfd_fget(fd);
+ new->map[new->num].event = eventfd_ctx_fdget(fd);
if (IS_ERR(new->map[new->num].event)) {
kfree(new);
return PTR_ERR(new->map[new->num].event);
@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file)
/* Release any eventfds they registered. */
for (i = 0; i < lg->eventfds->num; i++)
- fput(lg->eventfds->map[i].event);
+ eventfd_ctx_put(lg->eventfds->map[i].event);
kfree(lg->eventfds);
/* If lg->dead doesn't contain an error code it will be NULL or a
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index c3ae51584b12..3710ff88fc10 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -195,7 +195,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_exception_store **store)
{
int r = 0;
- struct dm_exception_store_type *type;
+ struct dm_exception_store_type *type = NULL;
struct dm_exception_store *tmp_store;
char persistent;
@@ -211,12 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
}
persistent = toupper(*argv[1]);
- if (persistent != 'P' && persistent != 'N') {
+ if (persistent == 'P')
+ type = get_type("P");
+ else if (persistent == 'N')
+ type = get_type("N");
+ else {
ti->error = "Persistent flag is not P or N";
return -EINVAL;
}
- type = get_type(&persistent);
if (!type) {
ti->error = "Exception store type not recognised";
r = -EINVAL;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4899ebe767c8..2cba557d9e61 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -495,7 +495,7 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
- if (blk_stack_limits(limits, &q->limits, start) < 0)
+ if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
DMWARN("%s: target device %s is misaligned",
dm_device_name(ti->table->md), bdevname(bdev, b));
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 240608cc7ae9..a461017ce5ce 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1313,6 +1313,12 @@ static int mmc_spi_probe(struct spi_device *spi)
struct mmc_spi_host *host;
int status;
+ /* We rely on full duplex transfers, mostly to reduce
+ * per-transfer overheads (by making fewer transfers).
+ */
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
/* MMC and SD specs only seem to care that sampling is on the
* rising edge ... meaning SPI modes 0 or 3. So either SPI mode
* should be legit. We'll use mode 0 since the steady state is 0,
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fbf1352e9c1c..951714a7f90a 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -8637,6 +8637,14 @@ static int bnx2x_nway_reset(struct net_device *dev)
return 0;
}
+static u32
+bnx2x_get_link(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return bp->link_vars.link_up;
+}
+
static int bnx2x_get_eeprom_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10034,7 +10042,7 @@ static struct ethtool_ops bnx2x_ethtool_ops = {
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
.nway_reset = bnx2x_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2x_get_link,
.get_eeprom_len = bnx2x_get_eeprom_len,
.get_eeprom = bnx2x_get_eeprom,
.set_eeprom = bnx2x_set_eeprom,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5e3356f8eb5a..5b8cbdb4b520 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2185,12 +2185,16 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->skb) {
+ if (buffer_info->dma) {
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ }
+
+ buffer_info->dma = 0;
+ if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
@@ -4033,6 +4037,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
buffer_info->dma,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
@@ -4222,6 +4227,7 @@ map_skb:
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
break; /* while !buffer_info->skb */
}
@@ -4817,6 +4823,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 679885a122b4..63415bb6f48f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4785,6 +4785,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
e1000e_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ea17319624aa..be480292aba1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4549,11 +4549,12 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
cleaned = true;
cleaned_count++;
+ /* this is the fast path for the non-packet split case */
if (!adapter->rx_ps_hdr_size) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_buffer_len +
- NET_IP_ALIGN,
+ adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, length);
goto send_up;
}
@@ -4570,8 +4571,9 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
if (!skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_ps_hdr_size + NET_IP_ALIGN,
+ adapter->rx_ps_hdr_size,
PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
@@ -4713,7 +4715,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
bufsz = adapter->rx_ps_hdr_size;
else
bufsz = adapter->rx_buffer_len;
- bufsz += NET_IP_ALIGN;
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4737,7 +4738,7 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
}
if (!buffer_info->skb) {
- skb = netdev_alloc_skb(netdev, bufsz);
+ skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@@ -5338,6 +5339,9 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
igb_down(adapter);
pci_disable_device(pdev);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f3eed6a8fba5..911c082cee5a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -677,6 +677,14 @@ static int bfin_sir_init_iobuf(iobuff_t *io, int size)
return 0;
}
+static const struct net_device_ops bfin_sir_ndo = {
+ .ndo_open = bfin_sir_open,
+ .ndo_stop = bfin_sir_stop,
+ .ndo_start_xmit = bfin_sir_hard_xmit,
+ .ndo_do_ioctl = bfin_sir_ioctl,
+ .ndo_get_stats = bfin_sir_stats,
+};
+
static int __devinit bfin_sir_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -718,12 +726,8 @@ static int __devinit bfin_sir_probe(struct platform_device *pdev)
if (err)
goto err_mem_3;
- dev->hard_start_xmit = bfin_sir_hard_xmit;
- dev->open = bfin_sir_open;
- dev->stop = bfin_sir_stop;
- dev->do_ioctl = bfin_sir_ioctl;
- dev->get_stats = bfin_sir_stats;
- dev->irq = sir_port->irq;
+ dev->netdev_ops = &bfin_sir_ndo;
+ dev->irq = sir_port->irq;
irda_init_max_qos_capabilies(&self->qos);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 86f4f3e36f27..0f7b6a3a2e68 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -139,7 +139,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
@@ -217,7 +217,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
s32 err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->mac.type == ixgbe_mac_82599EB)) {
+ (hw->phy.multispeed_fiber)) {
/* 10000/copper and 1000/copper must autoneg
* this function does not support any duplex forcing, but can
* limit the advertising of the adapter to only 10000 or 1000 */
@@ -245,6 +245,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
} else {
/* in this case we currently only support 10Gb/FULL */
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
(ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e756e220db32..5588ef493a3d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -563,7 +563,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
@@ -593,7 +592,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->skb) {
struct sk_buff *skb;
- skb = netdev_alloc_skb(adapter->netdev, bufsz);
+ skb = netdev_alloc_skb(adapter->netdev,
+ (rx_ring->rx_buf_len +
+ NET_IP_ALIGN));
if (!skb) {
adapter->alloc_rx_buff_failed++;
@@ -608,7 +609,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
skb_reserve(skb, NET_IP_ALIGN);
bi->skb = skb;
- bi->dma = pci_map_single(pdev, skb->data, bufsz,
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
}
/* Refresh the desc even if buffer_addrs didn't change because
@@ -732,6 +734,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
pci_unmap_single(pdev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
skb_put(skb, len);
}
@@ -2701,7 +2704,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
*/
err = hw->phy.ops.identify(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
ixgbe_down(adapter);
return err;
}
@@ -2812,9 +2818,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
}
if (!rx_buffer_info->page)
continue;
- pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
- PCI_DMA_FROMDEVICE);
- rx_buffer_info->page_dma = 0;
+ if (rx_buffer_info->page_dma) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ }
put_page(rx_buffer_info->page);
rx_buffer_info->page = NULL;
rx_buffer_info->page_offset = 0;
@@ -3720,10 +3728,11 @@ static void ixgbe_sfp_task(struct work_struct *work)
goto reschedule;
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "failed to initialize because an "
- "unsupported SFP+ module type was detected.\n"
- "Reload the driver after installing a "
- "supported module.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize "
+ "because an unsupported SFP+ module type "
+ "was detected.\n"
+ "Reload the driver after installing a "
+ "supported module.\n");
unregister_netdev(adapter->netdev);
} else {
DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
@@ -4502,7 +4511,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
u32 autoneg;
adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
- if (hw->mac.ops.get_link_capabilities)
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
hw->mac.ops.get_link_capabilities(hw, &autoneg,
&hw->mac.autoneg);
if (hw->mac.ops.setup_link_speed)
@@ -4526,7 +4536,10 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
ixgbe_down(adapter);
return;
}
@@ -5513,8 +5526,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
round_jiffies(jiffies + (2 * HZ)));
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- dev_err(&adapter->pdev->dev, "failed to load because an "
- "unsupported SFP+ module type was detected.\n");
+ dev_err(&adapter->pdev->dev, "failed to initialize because "
+ "an unsupported SFP+ module type was detected.\n"
+ "Reload the driver after installing a supported "
+ "module.\n");
goto err_sw_init;
} else if (err) {
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 80e01778dd3b..cd35d50e46d4 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -319,7 +319,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return crc == crc2;
if (unlikely(crc != crc2)) {
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
dev_kfree_skb_any(skb2);
} else
usbnet_skb_return(dev, skb2);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 7ae82446b93a..1d3730d6690f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -513,11 +513,11 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
if (unlikely(status & 0xbf)) {
- if (status & 0x01) dev->stats.rx_fifo_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x20) dev->stats.rx_missed_errors++;
- if (status & 0x90) dev->stats.rx_length_errors++;
+ if (status & 0x01) dev->net->stats.rx_fifo_errors++;
+ if (status & 0x02) dev->net->stats.rx_crc_errors++;
+ if (status & 0x04) dev->net->stats.rx_frame_errors++;
+ if (status & 0x20) dev->net->stats.rx_missed_errors++;
+ if (status & 0x90) dev->net->stats.rx_length_errors++;
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 034e8a73ca6b..aeb1ab03a9ee 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -433,7 +433,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
net->hard_header_len, dev->hard_mtu, net->mtu);
#endif
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
nc_ensure_sync(dev);
return 0;
}
@@ -442,12 +442,12 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
hdr_len = le16_to_cpup(&header->hdr_len);
packet_len = le16_to_cpup(&header->packet_len);
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("packet too big, %d", packet_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len < MIN_HEADER) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("header too short, %d", hdr_len);
nc_ensure_sync(dev);
return 0;
@@ -465,21 +465,21 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((packet_len & 0x01) == 0) {
if (skb->data [packet_len] != PAD_BYTE) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad pad");
return 0;
}
skb_trim(skb, skb->len - 1);
}
if (skb->len != packet_len) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
dbg("bad packet len %d (expected %d)",
skb->len, packet_len);
nc_ensure_sync(dev);
return 0;
}
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
- dev->stats.rx_fifo_errors++;
+ dev->net->stats.rx_fifo_errors++;
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
le16_to_cpu(header->packet_id),
le16_to_cpu(trailer->packet_id));
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 1bf243ef950e..2232232b7989 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -487,7 +487,7 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|| skb->len < msg_len
|| (data_offset + data_len + 8) > msg_len)) {
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
le32_to_cpu(hdr->msg_type),
msg_len, data_offset, data_len, skb->len);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 89a91f8c22de..fe045896406b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1108,18 +1108,18 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (unlikely(header & RX_STS_ES_)) {
if (netif_msg_rx_err(dev))
devdbg(dev, "Error header=0x%08x", header);
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_dropped++;
if (header & RX_STS_CRC_) {
- dev->stats.rx_crc_errors++;
+ dev->net->stats.rx_crc_errors++;
} else {
if (header & (RX_STS_TL_ | RX_STS_RF_))
- dev->stats.rx_frame_errors++;
+ dev->net->stats.rx_frame_errors++;
if ((header & RX_STS_LE_) &&
(!(header & RX_STS_FT_)))
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_length_errors++;
}
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 22c0585a0319..edfd9e10ceba 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -234,8 +234,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
int status;
skb->protocol = eth_type_trans (skb, dev->net);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb->len;
if (netif_msg_rx_status (dev))
devdbg (dev, "< rx, len %zu, type 0x%x",
@@ -397,7 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
if (netif_msg_rx_err (dev))
devdbg (dev, "drop");
error:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
skb_queue_tail (&dev->done, skb);
}
}
@@ -420,8 +420,8 @@ static void rx_complete (struct urb *urb)
case 0:
if (skb->len < dev->net->hard_header_len) {
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx length %d", skb->len);
}
@@ -433,7 +433,7 @@ static void rx_complete (struct urb *urb)
* storm, recovering as needed.
*/
case -EPIPE:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
// FALLTHROUGH
@@ -451,7 +451,7 @@ static void rx_complete (struct urb *urb)
case -EPROTO:
case -ETIME:
case -EILSEQ:
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
if (netif_msg_link (dev))
@@ -465,12 +465,12 @@ block:
/* data overrun ... flush fifo? */
case -EOVERFLOW:
- dev->stats.rx_over_errors++;
+ dev->net->stats.rx_over_errors++;
// FALLTHROUGH
default:
entry->state = rx_cleanup;
- dev->stats.rx_errors++;
+ dev->net->stats.rx_errors++;
if (netif_msg_rx_err (dev))
devdbg (dev, "rx status %d", urb_status);
break;
@@ -583,8 +583,8 @@ int usbnet_stop (struct net_device *net)
if (netif_msg_ifdown (dev))
devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
- dev->stats.rx_packets, dev->stats.tx_packets,
- dev->stats.rx_errors, dev->stats.tx_errors
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors
);
// ensure there are no more active urbs
@@ -891,10 +891,10 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += entry->length;
+ dev->net->stats.tx_packets++;
+ dev->net->stats.tx_bytes += entry->length;
} else {
- dev->stats.tx_errors++;
+ dev->net->stats.tx_errors++;
switch (urb->status) {
case -EPIPE:
@@ -1020,7 +1020,7 @@ int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
devdbg (dev, "drop, code %d", retval);
drop:
retval = NET_XMIT_SUCCESS;
- dev->stats.tx_dropped++;
+ dev->net->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1032d5fdbd42..2597145a066e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2907,6 +2907,7 @@ enum parport_pc_pci_cards {
netmos_9755,
netmos_9805,
netmos_9815,
+ netmos_9901,
quatech_sppxp100,
};
@@ -2987,7 +2988,7 @@ static struct parport_pc_pci {
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
-
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3089,6 +3090,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x2000, 0, 0, netmos_9901 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index aafd3e6ebb0d..a118eb0f1e67 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,8 +1,8 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
+ * Supports BF51x/BF52x/BF53[123]/BF53[467]/BF54x
*
- * Copyright 2004-2008 Analog Devices Inc.
+ * Copyright 2004-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -363,7 +363,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
struct bfin_rtc *rtc;
struct device *dev = &pdev->dev;
int ret = 0;
- unsigned long timeout;
+ unsigned long timeout = jiffies + HZ;
dev_dbg_stamp(dev);
@@ -374,32 +374,32 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
device_init_wakeup(dev, 1);
+ /* Register our RTC with the RTC framework */
+ rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops,
+ THIS_MODULE);
+ if (unlikely(IS_ERR(rtc->rtc_dev))) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto err;
+ }
+
/* Grab the IRQ and init the hardware */
ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
- goto err;
+ goto err_reg;
/* sometimes the bootloader touched things, but the write complete was not
* enabled, so let's just do a quick timeout here since the IRQ will not fire ...
*/
- timeout = jiffies + HZ;
while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
if (time_after(jiffies, timeout))
break;
bfin_rtc_reset(dev, RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
- /* Register our RTC with the RTC framework */
- rtc->rtc_dev = rtc_device_register(pdev->name, dev, &bfin_rtc_ops, THIS_MODULE);
- if (unlikely(IS_ERR(rtc->rtc_dev))) {
- ret = PTR_ERR(rtc->rtc_dev);
- goto err_irq;
- }
-
return 0;
- err_irq:
- free_irq(IRQ_RTC, dev);
- err:
+err_reg:
+ rtc_device_unregister(rtc->rtc_dev);
+err:
kfree(rtc);
return ret;
}
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index a07015d646dd..6160e03f410c 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -759,6 +759,8 @@ static int pci_netmos_init(struct pci_dev *dev)
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return 0;
@@ -3557,6 +3559,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_VENDOR_ID_IBM, 0x0299,
0, 0, pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index aa90ddb37066..8980a5640bd9 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -514,6 +514,8 @@ static int __init uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
master->setup = uwire_setup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 2a5abc08e857..f1db395dd889 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -258,6 +258,11 @@ static void bitbang_work(struct work_struct *work)
struct spi_bitbang *bitbang =
container_of(work, struct spi_bitbang, work);
unsigned long flags;
+ int do_setup = -1;
+ int (*setup_transfer)(struct spi_device *,
+ struct spi_transfer *);
+
+ setup_transfer = bitbang->setup_transfer;
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
@@ -269,8 +274,6 @@ static void bitbang_work(struct work_struct *work)
unsigned tmp;
unsigned cs_change;
int status;
- int (*setup_transfer)(struct spi_device *,
- struct spi_transfer *);
m = container_of(bitbang->queue.next, struct spi_message,
queue);
@@ -287,19 +290,19 @@ static void bitbang_work(struct work_struct *work)
tmp = 0;
cs_change = 1;
status = 0;
- setup_transfer = NULL;
list_for_each_entry (t, &m->transfers, transfer_list) {
- /* override or restore speed and wordsize */
- if (t->speed_hz || t->bits_per_word) {
- setup_transfer = bitbang->setup_transfer;
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
if (!setup_transfer) {
status = -ENOPROTOOPT;
break;
}
- }
- if (setup_transfer) {
status = setup_transfer(spi, t);
if (status < 0)
break;
@@ -363,9 +366,10 @@ static void bitbang_work(struct work_struct *work)
m->status = status;
m->complete(m->context);
- /* restore speed and wordsize */
- if (setup_transfer)
+ /* restore speed and wordsize if it was overridden */
+ if (do_setup == 1)
setup_transfer(spi, NULL);
+ do_setup = 0;
/* normally deactivate chipselect ... unless no error and
* cs_change has hinted that the next message will probably
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d869c4d3eb2..606e7a40a8da 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -58,15 +58,20 @@ static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
/* Bit masks for spi_device.mode management. Note that incorrect
- * settings for CS_HIGH and 3WIRE can cause *lots* of trouble for other
- * devices on a shared bus: CS_HIGH, because this device will be
- * active when it shouldn't be; 3WIRE, because when active it won't
- * behave as it should.
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
*
- * REVISIT should changing those two modes be privileged?
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
- | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP)
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY)
struct spidev_data {
dev_t devt;
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 018850c116c6..497ff8af03ed 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2414,7 +2414,10 @@ static int atafb_get_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
if (err)
return err;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- return fbhw->encode_fix(fix, &par);
+ mutex_lock(&info->mm_lock);
+ err = fbhw->encode_fix(fix, &par);
+ mutex_unlock(&info->mm_lock);
+ return err;
}
static int atafb_get_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2743,7 +2746,9 @@ static int atafb_set_par(struct fb_info *info)
/* Decode wanted screen parameters */
fbhw->decode_var(&info->var, par);
+ mutex_lock(&info->mm_lock);
fbhw->encode_fix(&info->fix, par);
+ mutex_unlock(&info->mm_lock);
/* Set new videomode */
ata_set_par(par);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 5afd64482f55..cb88394ba995 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -270,7 +270,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
smem_len = (var->xres_virtual * var->yres_virtual
* ((var->bits_per_pixel + 7) / 8));
+ mutex_lock(&info->mm_lock);
info->fix.smem_len = max(smem_len, sinfo->smem_len);
+ mutex_unlock(&info->mm_lock);
info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
(dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 7691e73823d3..1f39a62f899b 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -187,6 +187,8 @@ struct atyfb_par {
int mtrr_reg;
#endif
u32 mem_cntl;
+ struct crtc saved_crtc;
+ union aty_pll saved_pll;
};
/*
@@ -217,6 +219,7 @@ struct atyfb_par {
#define M64F_XL_DLL 0x00080000
#define M64F_MFB_FORCE_4 0x00100000
#define M64F_HW_TRIPLE 0x00200000
+#define M64F_XL_MEM 0x00400000
/*
* Register access
*/
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 1207c208a30b..63d3739d43a8 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,8 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/backlight.h>
+#include <linux/reboot.h>
+#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -249,8 +251,6 @@ static int aty_init(struct fb_info *info);
static int store_video_par(char *videopar, unsigned char m64_num);
#endif
-static struct crtc saved_crtc;
-static union aty_pll saved_pll;
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -261,6 +261,8 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
static int read_aty_sense(const struct atyfb_par *par);
#endif
+static DEFINE_MUTEX(reboot_lock);
+static struct fb_info *reboot_info;
/*
* Interface used by the world
@@ -361,8 +363,8 @@ static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
-#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4)
-#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_MOBIL_BUS)
+#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
+#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
static struct {
u16 pci_id;
@@ -539,6 +541,7 @@ static char ram_edo[] __devinitdata = "EDO";
static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
+static char ram_wram[] __devinitdata = "WRAM";
static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
@@ -553,6 +556,10 @@ static char *aty_gx_ram[8] __devinitdata = {
#ifdef CONFIG_FB_ATY_CT
static char *aty_ct_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
+ ram_sdram, ram_sgram, ram_wram, ram_resv
+};
+static char *aty_xl_ram[8] __devinitdata = {
+ ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
#endif /* CONFIG_FB_ATY_CT */
@@ -760,6 +767,17 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
+static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
+{
+ u32 line_length = vxres * bpp / 8;
+
+ if (par->ram_type == SGRAM ||
+ (!M64_HAS(XL_MEM) && par->ram_type == WRAM))
+ line_length = (line_length + 63) & ~63;
+
+ return line_length;
+}
+
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var, struct crtc *crtc)
{
@@ -769,13 +787,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
+ u32 line_length;
/* input */
- xres = var->xres;
+ xres = (var->xres + 7) & ~7;
yres = var->yres;
- vxres = var->xres_virtual;
+ vxres = (var->xres_virtual + 7) & ~7;
vyres = var->yres_virtual;
- xoffset = var->xoffset;
+ xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
bpp = var->bits_per_pixel;
if (bpp == 16)
@@ -827,7 +846,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
} else
FAIL("invalid bpp");
- if (vxres * vyres * bpp / 8 > info->fix.smem_len)
+ line_length = calc_line_length(par, vxres, bpp);
+
+ if (vyres * line_length > info->fix.smem_len)
FAIL("not enough video RAM");
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
@@ -969,7 +990,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->xoffset = xoffset;
crtc->yoffset = yoffset;
crtc->bpp = bpp;
- crtc->off_pitch = ((yoffset*vxres+xoffset)*bpp/64) | (vxres<<19);
+ crtc->off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
crtc->h_tot_disp = h_total | (h_disp<<16);
@@ -1394,7 +1417,9 @@ static int atyfb_set_par(struct fb_info *info)
}
aty_st_8(DAC_MASK, 0xff, par);
- info->fix.line_length = var->xres_virtual * var->bits_per_pixel/8;
+ info->fix.line_length = calc_line_length(par, var->xres_virtual,
+ var->bits_per_pixel);
+
info->fix.visual = var->bits_per_pixel <= 8 ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
@@ -1505,10 +1530,12 @@ static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
{
u32 xoffset = info->var.xoffset;
u32 yoffset = info->var.yoffset;
- u32 vxres = par->crtc.vxres;
+ u32 line_length = info->fix.line_length;
u32 bpp = info->var.bits_per_pixel;
- par->crtc.off_pitch = ((yoffset * vxres + xoffset) * bpp / 64) | (vxres << 19);
+ par->crtc.off_pitch =
+ ((yoffset * line_length + xoffset * bpp / 8) / 8) |
+ ((line_length / bpp) << 22);
}
@@ -2201,7 +2228,7 @@ static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
const int *refresh_tbl;
int i, size;
- if (IS_XL(par->pci_id) || IS_MOBILITY(par->pci_id)) {
+ if (M64_HAS(XL_MEM)) {
refresh_tbl = ragexl_tbl;
size = ARRAY_SIZE(ragexl_tbl);
} else {
@@ -2335,7 +2362,10 @@ static int __devinit aty_init(struct fb_info *info)
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
- ramname = aty_ct_ram[par->ram_type];
+ if (M64_HAS(XL_MEM))
+ ramname = aty_xl_ram[par->ram_type];
+ else
+ ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
par->pll_limits.mclk = 63;
@@ -2390,9 +2420,9 @@ static int __devinit aty_init(struct fb_info *info)
#endif /* CONFIG_FB_ATY_CT */
/* save previous video mode */
- aty_get_crtc(par, &saved_crtc);
+ aty_get_crtc(par, &par->saved_crtc);
if(par->pll_ops->get_pll)
- par->pll_ops->get_pll(info, &saved_pll);
+ par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
@@ -2667,8 +2697,8 @@ static int __devinit aty_init(struct fb_info *info)
aty_init_exit:
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
@@ -3502,6 +3532,11 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
par->mmap_map[1].prot_flag = _PAGE_E;
#endif /* __sparc__ */
+ mutex_lock(&reboot_lock);
+ if (!reboot_info)
+ reboot_info = info;
+ mutex_unlock(&reboot_lock);
+
return 0;
err_release_io:
@@ -3614,8 +3649,8 @@ static void __devexit atyfb_remove(struct fb_info *info)
struct atyfb_par *par = (struct atyfb_par *) info->par;
/* restore video mode */
- aty_set_crtc(par, &saved_crtc);
- par->pll_ops->set_pll(info, &saved_pll);
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(info, &par->saved_pll);
unregister_framebuffer(info);
@@ -3661,6 +3696,11 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
+ mutex_lock(&reboot_lock);
+ if (reboot_info == info)
+ reboot_info = NULL;
+ mutex_unlock(&reboot_lock);
+
atyfb_remove(info);
}
@@ -3808,6 +3848,56 @@ static int __init atyfb_setup(char *options)
}
#endif /* MODULE */
+static int atyfb_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct atyfb_par *par;
+
+ if (code != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ mutex_lock(&reboot_lock);
+
+ if (!reboot_info)
+ goto out;
+
+ if (!lock_fb_info(reboot_info))
+ goto out;
+
+ par = reboot_info->par;
+
+ /*
+ * HP OmniBook 500's BIOS doesn't like the state of the
+ * hardware after atyfb has been used. Restore the hardware
+ * to the original state to allow successful reboots.
+ */
+ aty_set_crtc(par, &par->saved_crtc);
+ par->pll_ops->set_pll(reboot_info, &par->saved_pll);
+
+ unlock_fb_info(reboot_info);
+ out:
+ mutex_unlock(&reboot_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block atyfb_reboot_notifier = {
+ .notifier_call = atyfb_reboot_notify,
+};
+
+static const struct dmi_system_id atyfb_reboot_ids[] = {
+ {
+ .ident = "HP OmniBook 500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
+ },
+ },
+
+ { }
+};
+
static int __init atyfb_init(void)
{
int err1 = 1, err2 = 1;
@@ -3826,11 +3916,20 @@ static int __init atyfb_init(void)
err2 = atyfb_atari_probe();
#endif
- return (err1 && err2) ? -ENODEV : 0;
+ if (err1 && err2)
+ return -ENODEV;
+
+ if (dmi_check_system(atyfb_reboot_ids))
+ register_reboot_notifier(&atyfb_reboot_notifier);
+
+ return 0;
}
static void __exit atyfb_exit(void)
{
+ if (dmi_check_system(atyfb_reboot_ids))
+ unregister_reboot_notifier(&atyfb_reboot_notifier);
+
#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
#endif
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 0cc9724e61a2..51fcc0a2c94a 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -63,14 +63,17 @@ static void reset_GTC_3D_engine(const struct atyfb_par *par)
void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
{
u32 pitch_value;
+ u32 vxres;
/* determine modal information from global mode structure */
- pitch_value = info->var.xres_virtual;
+ pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8);
+ vxres = info->var.xres_virtual;
if (info->var.bits_per_pixel == 24) {
/* In 24 bpp, the engine is in 8 bpp - this requires that all */
/* horizontal coordinates and widths must be adjusted */
pitch_value *= 3;
+ vxres *= 3;
}
/* On GTC (RagePro), we need to reset the 3D engine before */
@@ -133,7 +136,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
aty_st_le32(SC_LEFT, 0, par);
aty_st_le32(SC_TOP, 0, par);
aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par);
- aty_st_le32(SC_RIGHT, pitch_value - 1, par);
+ aty_st_le32(SC_RIGHT, vxres - 1, par);
/* set background color to minimum value (usually BLACK) */
aty_st_le32(DP_BKGD_CLR, 0, par);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index f8a09bf8d0cd..53ea05645ff8 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1310,8 +1310,6 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
-__acquires(&info->lock)
-__releases(&info->lock)
{
int fbidx = iminor(file->f_path.dentry->d_inode);
struct fb_info *info = registered_fb[fbidx];
@@ -1325,16 +1323,14 @@ __releases(&info->lock)
off = vma->vm_pgoff << PAGE_SHIFT;
if (!fb)
return -ENODEV;
+ mutex_lock(&info->mm_lock);
if (fb->fb_mmap) {
int res;
- mutex_lock(&info->lock);
res = fb->fb_mmap(info, vma);
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return res;
}
- mutex_lock(&info->lock);
-
/* frame buffer memory */
start = info->fix.smem_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
@@ -1342,13 +1338,13 @@ __releases(&info->lock)
/* memory mapped io */
off -= len;
if (info->var.accel_flags) {
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
return -EINVAL;
}
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
}
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->mm_lock);
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
return -EINVAL;
@@ -1518,6 +1514,7 @@ register_framebuffer(struct fb_info *fb_info)
break;
fb_info->node = i;
mutex_init(&fb_info->lock);
+ mutex_init(&fb_info->mm_lock);
fb_info->dev = device_create(fb_class, fb_info->device,
MKDEV(FB_MAJOR, i), NULL, "fb%d", i);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index f153c581cbd7..0bf2190928d0 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -750,24 +750,26 @@ static void update_lcdc(struct fb_info *info)
static int map_video_memory(struct fb_info *info)
{
phys_addr_t phys;
+ u32 smem_len = info->fix.line_length * info->var.yres_virtual;
pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+ pr_debug("MAP_VIDEO_MEMORY: smem_len = %u\n", smem_len);
- info->fix.smem_len = info->fix.line_length * info->var.yres_virtual;
- pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
- info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
+ info->screen_base = fsl_diu_alloc(smem_len, &phys);
if (info->screen_base == NULL) {
printk(KERN_ERR "Unable to allocate fb memory\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (unsigned long) phys;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
- info->fix.smem_start,
- info->fix.smem_len);
+ info->fix.smem_start, info->fix.smem_len);
pr_debug("screen base %p\n", info->screen_base);
return 0;
@@ -776,9 +778,11 @@ static int map_video_memory(struct fb_info *info)
static void unmap_video_memory(struct fb_info *info)
{
fsl_diu_free(info->screen_base, info->fix.smem_len);
+ mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
+ mutex_unlock(&info->mm_lock);
}
/*
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 2e940199fc89..71960672d721 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1090,8 +1090,10 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
strcpy(fix->id, "I810");
+ mutex_lock(&info->mm_lock);
fix->smem_start = par->fb.physical;
fix->smem_len = par->fb.size;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->xpanstep = 8;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 8e7a275df50c..59c3a2e14913 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -724,8 +724,10 @@ static void matroxfb_update_fix(WPMINFO2)
struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
DBG(__func__)
+ mutex_lock(&ACCESS_FBINFO(fbcon).mm_lock);
fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
+ mutex_unlock(&ACCESS_FBINFO(fbcon).mm_lock);
}
static int matroxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -2081,6 +2083,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
spin_lock_init(&ACCESS_FBINFO(lock.accel));
init_rwsem(&ACCESS_FBINFO(crtc2.lock));
init_rwsem(&ACCESS_FBINFO(altout.lock));
+ mutex_init(&ACCESS_FBINFO(fbcon).mm_lock);
ACCESS_FBINFO(irq_flags) = 0;
init_waitqueue_head(&ACCESS_FBINFO(crtc1.vsync.wait));
init_waitqueue_head(&ACCESS_FBINFO(crtc2.vsync.wait));
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 7ac4c5f6145d..909e10a11898 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -289,13 +289,16 @@ static int matroxfb_dh_release(struct fb_info* info, int user) {
#undef m2info
}
-static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) {
+static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info)
+{
struct fb_fix_screeninfo *fix = &m2info->fbcon.fix;
strcpy(fix->id, "MATROX DH");
+ mutex_lock(&m2info->fbcon.mm_lock);
fix->smem_start = m2info->video.base;
fix->smem_len = m2info->video.len_usable;
+ mutex_unlock(&m2info->fbcon.mm_lock);
fix->ypanstep = 1;
fix->ywrapstep = 0;
fix->xpanstep = 8; /* TBD */
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index b7af5256e887..567fb944bd2a 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -669,7 +669,7 @@ static uint32_t bpp_to_pixfmt(int bpp)
}
static int mx3fb_blank(int blank, struct fb_info *fbi);
-static int mx3fb_map_video_memory(struct fb_info *fbi);
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len);
static int mx3fb_unmap_video_memory(struct fb_info *fbi);
/**
@@ -742,8 +742,7 @@ static int mx3fb_set_par(struct fb_info *fbi)
if (fbi->fix.smem_start)
mx3fb_unmap_video_memory(fbi);
- fbi->fix.smem_len = mem_len;
- if (mx3fb_map_video_memory(fbi) < 0) {
+ if (mx3fb_map_video_memory(fbi, mem_len) < 0) {
mutex_unlock(&mx3_fbi->mutex);
return -ENOMEM;
}
@@ -1198,6 +1197,7 @@ static int mx3fb_resume(struct platform_device *pdev)
/**
* mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
* @fbi: framebuffer information pointer
+ * @mem_len: length of mapped memory
* @return: Error code indicating success or failure
*
* This buffer is remapped into a non-cached, non-buffered, memory region to
@@ -1205,23 +1205,26 @@ static int mx3fb_resume(struct platform_device *pdev)
* area is remapped, all virtual memory access to the video memory should occur
* at the new region.
*/
-static int mx3fb_map_video_memory(struct fb_info *fbi)
+static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len)
{
int retval = 0;
dma_addr_t addr;
fbi->screen_base = dma_alloc_writecombine(fbi->device,
- fbi->fix.smem_len,
+ mem_len,
&addr, GFP_DMA);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
- fbi->fix.smem_len);
+ mem_len);
retval = -EBUSY;
goto err0;
}
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = addr;
+ fbi->fix.smem_len = mem_len;
+ mutex_unlock(&fbi->mm_lock);
dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
(uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
@@ -1251,8 +1254,10 @@ static int mx3fb_unmap_video_memory(struct fb_info *fbi)
fbi->screen_base, fbi->fix.smem_start);
fbi->screen_base = 0;
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
return 0;
}
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 060d72fe57cb..4ea99bfc37b4 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,8 +393,10 @@ static void set_fb_fix(struct fb_info *fbi)
rg = &plane->fbdev->mem_desc.region[plane->idx];
fbi->screen_base = rg->vaddr;
+ mutex_lock(&fbi->mm_lock);
fix->smem_start = rg->paddr;
fix->smem_len = rg->size;
+ mutex_unlock(&fbi->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
bpp = var->bits_per_pixel;
@@ -886,8 +888,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
* plane memory is dealloce'd, the other
* screen parameters in var / fix are invalid.
*/
+ mutex_lock(&fbi->mm_lock);
fbi->fix.smem_start = 0;
fbi->fix.smem_len = 0;
+ mutex_unlock(&fbi->mm_lock);
}
}
}
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 03b3670130a0..bacfabd9ce16 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -141,7 +141,9 @@ static int platinumfb_set_par (struct fb_info *info)
offset = 0x10;
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
+ mutex_unlock(&info->mm_lock);
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 0889d50c3288..6506117c134b 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -815,8 +815,10 @@ static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
ofb->video_mem_size = size;
+ mutex_lock(&ofb->fb.mm_lock);
ofb->fb.fix.smem_start = ofb->video_mem_phys;
ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
+ mutex_unlock(&ofb->fb.mm_lock);
ofb->fb.screen_base = ofb->video_mem;
return 0;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 653bdfee3057..9f6d6e61f0cc 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -120,18 +120,6 @@ static int sh7760_setcolreg (u_int regno,
return 0;
}
-static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
- unsigned long stride)
-{
- memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, "sh7760-lcdc");
-
- fix->smem_start = (unsigned long)info->screen_base;
- fix->smem_len = info->screen_size;
-
- fix->line_length = stride;
-}
-
static int sh7760fb_get_color_info(struct device *dev,
u16 lddfr, int *bpp, int *gray)
{
@@ -334,7 +322,8 @@ static int sh7760fb_set_par(struct fb_info *info)
iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
- encode_fix(&info->fix, info, stride);
+ info->fix.line_length = stride;
+
sh7760fb_check_var(&info->var, info);
sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
@@ -435,6 +424,8 @@ static int sh7760fb_alloc_mem(struct fb_info *info)
info->screen_base = fbmem;
info->screen_size = vram;
+ info->fix.smem_start = (unsigned long)info->screen_base;
+ info->fix.smem_len = info->screen_size;
return 0;
}
@@ -520,6 +511,8 @@ static int __devinit sh7760fb_probe(struct platform_device *pdev)
info->var.transp.length = 0;
info->var.transp.msb_right = 0;
+ strcpy(info->fix.id, "sh7760-lcdc");
+
/* set the DON2 bit now, before cmap allocation, as it will randomize
* palette memory.
*/
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7072d19080d5..fd33455389b8 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1847,8 +1847,10 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
strcpy(fix->id, ivideo->myid);
+ mutex_lock(&info->mm_lock);
fix->smem_start = ivideo->video_base + ivideo->video_offset;
fix->smem_len = ivideo->sisfb_mem;
+ mutex_unlock(&info->mm_lock);
fix->type = FB_TYPE_PACKED_PIXELS;
fix->type_aux = 0;
fix->visual = (ivideo->video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index eb5d73a06702..98f24f0ec00d 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -145,7 +145,7 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
#define SM501_MEMF_ACCEL (8)
static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
- unsigned int why, size_t size)
+ unsigned int why, size_t size, u32 smem_len)
{
struct sm501fb_par *par;
struct fb_info *fbi;
@@ -172,7 +172,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
if (ptr > 0)
ptr &= ~(PAGE_SIZE - 1);
- if (fbi && ptr < fbi->fix.smem_len)
+ if (fbi && ptr < smem_len)
return -ENOMEM;
break;
@@ -197,7 +197,7 @@ static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
case SM501_MEMF_ACCEL:
fbi = inf->fb[HEAD_CRT];
- ptr = fbi ? fbi->fix.smem_len : 0;
+ ptr = fbi ? smem_len : 0;
fbi = inf->fb[HEAD_PANEL];
if (fbi) {
@@ -413,6 +413,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
unsigned int mem_type;
unsigned int clock_type;
unsigned int head_addr;
+ unsigned int smem_len;
dev_dbg(fbi->dev, "%s: %dx%d, bpp = %d, virtual %dx%d\n",
__func__, var->xres, var->yres, var->bits_per_pixel,
@@ -453,18 +454,20 @@ static int sm501fb_set_par_common(struct fb_info *info,
/* allocate fb memory within 501 */
info->fix.line_length = (var->xres_virtual * var->bits_per_pixel)/8;
- info->fix.smem_len = info->fix.line_length * var->yres_virtual;
+ smem_len = info->fix.line_length * var->yres_virtual;
dev_dbg(fbi->dev, "%s: line length = %u\n", __func__,
info->fix.line_length);
- if (sm501_alloc_mem(fbi, &par->screen, mem_type,
- info->fix.smem_len)) {
+ if (sm501_alloc_mem(fbi, &par->screen, mem_type, smem_len, smem_len)) {
dev_err(fbi->dev, "no memory available\n");
return -ENOMEM;
}
+ mutex_lock(&info->mm_lock);
info->fix.smem_start = fbi->fbmem_res->start + par->screen.sm_addr;
+ info->fix.smem_len = smem_len;
+ mutex_unlock(&info->mm_lock);
info->screen_base = fbi->fbmem + par->screen.sm_addr;
info->screen_size = info->fix.smem_len;
@@ -637,7 +640,8 @@ static int sm501fb_set_par_crt(struct fb_info *info)
if ((control & SM501_DC_CRT_CONTROL_SEL) == 0) {
/* the head is displaying panel data... */
- sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0);
+ sm501_alloc_mem(fbi, &par->screen, SM501_MEMF_CRT, 0,
+ info->fix.smem_len);
goto out_update;
}
@@ -1289,7 +1293,8 @@ static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
par->cursor_regs = info->regs + reg_base;
- ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024);
+ ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024,
+ fbi->fix.smem_len);
if (ret < 0)
return ret;
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index d0674f1e3f10..8a141c2c637b 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -523,6 +523,7 @@ static int w100fb_set_par(struct fb_info *info)
info->fix.ywrapstep = 0;
info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
+ mutex_lock(&info->mm_lock);
if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
par->extmem_active = 1;
info->fix.smem_len = par->mach->mem->size+1;
@@ -530,6 +531,7 @@ static int w100fb_set_par(struct fb_info *info)
par->extmem_active = 0;
info->fix.smem_len = MEM_INT_SIZE+1;
}
+ mutex_unlock(&info->mm_lock);
w100fb_activate_var(par);
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 210acafe4a9b..3ff8bdd18fb3 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -432,7 +432,6 @@ vfs_rejected_lock:
list_del_init(&fl->fl_u.afs.link);
if (list_empty(&vnode->granted_locks))
afs_defer_unlock(vnode, key);
- spin_unlock(&vnode->lock);
goto abort_attempt;
}
diff --git a/fs/aio.c b/fs/aio.c
index 76da12537956..d065b2c3273e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
assert_spin_locked(&ctx->ctx_lock);
+ if (req->ki_eventfd != NULL)
+ eventfd_ctx_put(req->ki_eventfd);
if (req->ki_dtor)
req->ki_dtor(req);
if (req->ki_iovec != &req->ki_inline_vec)
@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data)
/* Complete the fput(s) */
if (req->ki_filp != NULL)
__fput(req->ki_filp);
- if (req->ki_eventfd != NULL)
- __fput(req->ki_eventfd);
/* Link the iocb into the context's free list */
spin_lock_irq(&ctx->ctx_lock);
@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data)
*/
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{
- int schedule_putreq = 0;
-
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count));
@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
- if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count)))
- schedule_putreq++;
- else
- req->ki_filp = NULL;
- if (req->ki_eventfd != NULL) {
- if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
- schedule_putreq++;
- else
- req->ki_eventfd = NULL;
- }
- if (unlikely(schedule_putreq)) {
+ if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
queue_work(aio_wq, &fput_work);
- } else
+ } else {
+ req->ki_filp = NULL;
really_put_req(ctx, req);
+ }
return 1;
}
@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
- req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
+ req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 9fa212b014a5..f1867900e459 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1929,7 +1929,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
if (!elf)
goto out;
-
+ /*
+ * The number of segs are recored into ELF header as 16bit value.
+ * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
+ */
segs = current->mm->map_count;
#ifdef ELF_CORE_EXTRA_PHDRS
segs += ELF_CORE_EXTRA_PHDRS;
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 3f0e1974abdc..31d12de83a2a 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -14,35 +14,44 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
-#include <linux/eventfd.h>
#include <linux/syscalls.h>
#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/eventfd.h>
struct eventfd_ctx {
+ struct kref kref;
wait_queue_head_t wqh;
/*
* Every time that a write(2) is performed on an eventfd, the
* value of the __u64 being written is added to "count" and a
* wakeup is performed on "wqh". A read(2) will return the "count"
* value to userspace, and will reset "count" to zero. The kernel
- * size eventfd_signal() also, adds to the "count" counter and
+ * side eventfd_signal() also, adds to the "count" counter and
* issue a wakeup.
*/
__u64 count;
unsigned int flags;
};
-/*
- * Adds "n" to the eventfd counter "count". Returns "n" in case of
- * success, or a value lower then "n" in case of coutner overflow.
- * This function is supposed to be called by the kernel in paths
- * that do not allow sleeping. In this function we allow the counter
- * to reach the ULLONG_MAX value, and we signal this as overflow
- * condition by returining a POLLERR to poll(2).
+/**
+ * eventfd_signal - Adds @n to the eventfd counter.
+ * @ctx: [in] Pointer to the eventfd context.
+ * @n: [in] Value of the counter to be added to the eventfd internal counter.
+ * The value cannot be negative.
+ *
+ * This function is supposed to be called by the kernel in paths that do not
+ * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
+ * value, and we signal this as overflow condition by returining a POLLERR
+ * to poll(2).
+ *
+ * Returns @n in case of success, a non-negative number lower than @n in case
+ * of overflow, or the following error codes:
+ *
+ * -EINVAL : The value of @n is negative.
*/
-int eventfd_signal(struct file *file, int n)
+int eventfd_signal(struct eventfd_ctx *ctx, int n)
{
- struct eventfd_ctx *ctx = file->private_data;
unsigned long flags;
if (n < 0)
@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n)
}
EXPORT_SYMBOL_GPL(eventfd_signal);
+static void eventfd_free(struct kref *kref)
+{
+ struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
+
+ kfree(ctx);
+}
+
+/**
+ * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to the eventfd context.
+ *
+ * Returns: In case of success, returns a pointer to the eventfd context.
+ */
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
+{
+ kref_get(&ctx->kref);
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_get);
+
+/**
+ * eventfd_ctx_put - Releases a reference to the internal eventfd context.
+ * @ctx: [in] Pointer to eventfd context.
+ *
+ * The eventfd context reference must have been previously acquired either
+ * with eventfd_ctx_get() or eventfd_ctx_fdget()).
+ */
+void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+ kref_put(&ctx->kref, eventfd_free);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_put);
+
static int eventfd_release(struct inode *inode, struct file *file)
{
- kfree(file->private_data);
+ struct eventfd_ctx *ctx = file->private_data;
+
+ wake_up_poll(&ctx->wqh, POLLHUP);
+ eventfd_ctx_put(ctx);
return 0;
}
@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = {
.write = eventfd_write,
};
+/**
+ * eventfd_fget - Acquire a reference of an eventfd file descriptor.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the eventfd file structure in case of success, or the
+ * following error pointer:
+ *
+ * -EBADF : Invalid @fd file descriptor.
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
struct file *eventfd_fget(int fd)
{
struct file *file;
@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd)
}
EXPORT_SYMBOL_GPL(eventfd_fget);
+/**
+ * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
+ * @fd: [in] Eventfd file descriptor.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointers returned by the following functions:
+ *
+ * eventfd_fget
+ */
+struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ struct file *file;
+ struct eventfd_ctx *ctx;
+
+ file = eventfd_fget(fd);
+ if (IS_ERR(file))
+ return (struct eventfd_ctx *) file;
+ ctx = eventfd_ctx_get(file->private_data);
+ fput(file);
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
+
+/**
+ * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
+ * @file: [in] Eventfd file pointer.
+ *
+ * Returns a pointer to the internal eventfd context, otherwise the error
+ * pointer:
+ *
+ * -EINVAL : The @fd file descriptor is not an eventfd file.
+ */
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
+{
+ if (file->f_op != &eventfd_fops)
+ return ERR_PTR(-EINVAL);
+
+ return eventfd_ctx_get(file->private_data);
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
+
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
int fd;
@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
if (!ctx)
return -ENOMEM;
+ kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 6524ecaebb7a..e1dedb0f7873 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -66,8 +66,16 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
inode = NULL;
if (ino) {
inode = ext2_iget(dir->i_sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
+ if (unlikely(IS_ERR(inode))) {
+ if (PTR_ERR(inode) == -ESTALE) {
+ ext2_error(dir->i_sb, __func__,
+ "deleted inode referenced: %lu",
+ ino);
+ return ERR_PTR(-EIO);
+ } else {
+ return ERR_CAST(inode);
+ }
+ }
}
return d_splice_alias(inode, dentry);
}
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index fe02ad4740e7..032604e5ef2c 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -972,6 +972,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
/* NULL is printed as <NULL> by sprintf: avoid that. */
if (req_root == NULL)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index d7d50d7ee51e..aa00800adacc 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -97,4 +97,8 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_ATTRIBUTES
#endif
+#ifndef PER_CPU_DEF_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
+#endif
+
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 92b73b6140ff..dccdbed05848 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -441,7 +441,8 @@
}
#ifdef CONFIG_CONSTRUCTORS
-#define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \
+#define KERNEL_CTORS() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__ctors_start) = .; \
*(.ctors) \
VMLINUX_SYMBOL(__ctors_end) = .;
#else
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b16a957030f8..47f7d932a01d 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -121,9 +121,9 @@ struct kiocb {
/*
* If the aio_resfd field of the userspace iocb is not zero,
- * this is the underlying file* to deliver event to.
+ * this is the underlying eventfd context to deliver events to.
*/
- struct file *ki_eventfd;
+ struct eventfd_ctx *ki_eventfd;
};
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index f45a8ae5f828..3b85ba6479f4 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -8,10 +8,8 @@
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
-#ifdef CONFIG_EVENTFD
-
-/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>
+#include <linux/file.h>
/*
* CAREFUL: Check include/asm-generic/fcntl.h when defining
@@ -27,16 +25,37 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+#ifdef CONFIG_EVENTFD
+
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
+void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
-int eventfd_signal(struct file *file, int n);
+struct eventfd_ctx *eventfd_ctx_fdget(int fd);
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
+int eventfd_signal(struct eventfd_ctx *ctx, int n);
#else /* CONFIG_EVENTFD */
-#define eventfd_fget(fd) ERR_PTR(-ENOSYS)
-static inline int eventfd_signal(struct file *file, int n)
-{ return 0; }
+/*
+ * Ugly ugly ugly error layer to support modules that uses eventfd but
+ * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
+ */
+static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+{
+ return -ENOSYS;
+}
+
+static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+
+}
-#endif /* CONFIG_EVENTFD */
+#endif
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dd68358996b7..f847df9e99b6 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -819,6 +819,7 @@ struct fb_info {
int node;
int flags;
struct mutex lock; /* Lock for open/release/ioctl funcs */
+ struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 3430c7751948..7ae05338e94c 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -81,4 +81,17 @@ struct xt_conntrack_mtinfo1 {
__u8 state_mask, status_mask;
};
+struct xt_conntrack_mtinfo2 {
+ union nf_inet_addr origsrc_addr, origsrc_mask;
+ union nf_inet_addr origdst_addr, origdst_mask;
+ union nf_inet_addr replsrc_addr, replsrc_mask;
+ union nf_inet_addr repldst_addr, repldst_mask;
+ __u32 expires_min, expires_max;
+ __u16 l4proto;
+ __be16 origsrc_port, origdst_port;
+ __be16 replsrc_port, repldst_port;
+ __u16 match_flags, invert_flags;
+ __u16 state_mask, status_mask;
+};
+
#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_osf.h b/include/linux/netfilter/xt_osf.h
index fd2272e0959a..18afa495f973 100644
--- a/include/linux/netfilter/xt_osf.h
+++ b/include/linux/netfilter/xt_osf.h
@@ -20,6 +20,8 @@
#ifndef _XT_OSF_H
#define _XT_OSF_H
+#include <linux/types.h>
+
#define MAXGENRELEN 32
#define XT_OSF_GENRE (1<<0)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a3b000365795..73b46b6b904f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2645,6 +2645,7 @@
#define PCI_DEVICE_ID_NETMOS_9835 0x9835
#define PCI_DEVICE_ID_NETMOS_9845 0x9845
#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+#define PCI_DEVICE_ID_NETMOS_9901 0x9901
#define PCI_VENDOR_ID_3COM_2 0xa727
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8f921d74f49f..68438e18fff4 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -24,7 +24,8 @@
#define DEFINE_PER_CPU_SECTION(type, name, section) \
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+ PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \
+ __typeof__(type) per_cpu__##name
/*
* Variant on the per-CPU variable declaration/definition theme used for
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 89698d8aba5c..5e970c7d3fd5 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -178,8 +178,10 @@ struct perf_counter_attr {
mmap : 1, /* include mmap data */
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
+ inherit_stat : 1, /* per task counts */
+ enable_on_exec : 1, /* next exec enables */
- __reserved_1 : 53;
+ __reserved_1 : 51;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
@@ -232,6 +234,14 @@ struct perf_counter_mmap_page {
__u32 lock; /* seqlock for synchronization */
__u32 index; /* hardware counter identifier */
__s64 offset; /* add to hardware counter value */
+ __u64 time_enabled; /* time counter active */
+ __u64 time_running; /* time counter on cpu */
+
+ /*
+ * Hole for extension of the self monitor capabilities
+ */
+
+ __u64 __reserved[123]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
@@ -253,7 +263,6 @@ struct perf_counter_mmap_page {
#define PERF_EVENT_MISC_KERNEL (1 << 0)
#define PERF_EVENT_MISC_USER (2 << 0)
#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
struct perf_event_header {
__u32 type;
@@ -327,9 +336,18 @@ enum perf_event_type {
PERF_EVENT_FORK = 7,
/*
- * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
- * will be PERF_SAMPLE_*
- *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, tid;
+ * u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_RUNNING
+ * { u64 parent_id; } && PERF_FORMAT_ID
+ * };
+ */
+ PERF_EVENT_READ = 8,
+
+ /*
* struct {
* struct perf_event_header header;
*
@@ -337,8 +355,9 @@ enum perf_event_type {
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u64 id; } && PERF_SAMPLE_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
*
* { u64 nr;
* { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
@@ -347,6 +366,9 @@ enum perf_event_type {
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
+ PERF_EVENT_SAMPLE = 9,
+
+ PERF_EVENT_MAX, /* non-ABI */
};
enum perf_callchain_context {
@@ -582,6 +604,7 @@ struct perf_counter_context {
int nr_counters;
int nr_active;
int is_active;
+ int nr_stat;
atomic_t refcount;
struct task_struct *task;
@@ -669,7 +692,16 @@ static inline int is_software_counter(struct perf_counter *counter)
(counter->attr.type != PERF_TYPE_HW_CACHE);
}
-extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+
+extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+
+static inline void
+perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+ if (atomic_read(&perf_swcounter_enabled[event]))
+ __perf_swcounter_event(event, nr, nmi, regs, addr);
+}
extern void __perf_counter_mmap(struct vm_area_struct *vma);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d0754269884..0085d758d645 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -349,8 +349,20 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
struct nsproxy;
struct user_namespace;
-/* Maximum number of active map areas.. This is a random (large) number */
-#define DEFAULT_MAX_MAP_COUNT 65536
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
extern int sysctl_max_map_count;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 9c4cd27f4685..c47c4b4da97e 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -80,6 +80,8 @@ struct spi_device {
#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
#define SPI_3WIRE 0x10 /* SI/SO signals shared */
#define SPI_LOOP 0x20 /* loopback mode */
+#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define SPI_READY 0x80 /* slave pulls low to pause */
u8 bits_per_word;
int irq;
void *controller_state;
@@ -248,6 +250,10 @@ struct spi_master {
/* spi_device.mode flags understood by this controller driver */
u16 mode_bits;
+ /* other constraints relevant to this driver */
+ u16 flags;
+#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
+
/* Setup mode and clock, etc (spi driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h
index 95251ccd5a07..bf0570a84f7a 100644
--- a/include/linux/spi/spidev.h
+++ b/include/linux/spi/spidev.h
@@ -40,6 +40,8 @@
#define SPI_LSB_FIRST 0x08
#define SPI_3WIRE 0x10
#define SPI_LOOP 0x20
+#define SPI_NO_CS 0x40
+#define SPI_READY 0x80
/*---------------------------------------------------------------------------*/
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 5d44059f6d63..310e18a880ff 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -42,7 +42,6 @@ struct usbnet {
/* protocol/interface state */
struct net_device *net;
- struct net_device_stats stats;
int msg_enable;
unsigned long data [5];
u32 xid;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index a632689b61b4..cbdd6284996d 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -258,8 +258,8 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
/* Update TCP window tracking data when NAT mangles the packet */
extern void nf_conntrack_tcp_update(const struct sk_buff *skb,
unsigned int dataoff,
- struct nf_conn *ct,
- int dir);
+ struct nf_conn *ct, int dir,
+ s16 offset);
/* Fake conntrack entry for untracked connections */
extern struct nf_conn nf_conntrack_untracked;
diff --git a/kernel/acct.c b/kernel/acct.c
index 7afa31564162..9f3391090b3e 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -215,6 +215,7 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
static int acct_on(char *name)
{
struct file *file;
+ struct vfsmount *mnt;
int error;
struct pid_namespace *ns;
struct bsd_acct_struct *acct = NULL;
@@ -256,11 +257,12 @@ static int acct_on(char *name)
acct = NULL;
}
- mnt_pin(file->f_path.mnt);
+ mnt = file->f_path.mnt;
+ mnt_pin(mnt);
acct_file_reopen(ns->bacct, file, ns);
spin_unlock(&acct_lock);
- mntput(file->f_path.mnt); /* it's pinned, now give up active reference */
+ mntput(mnt); /* it's pinned, now give up active reference */
kfree(acct);
return 0;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 1a933a221ea4..d55a50da2347 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -236,6 +236,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
list_add_rcu(&counter->event_entry, &ctx->event_list);
ctx->nr_counters++;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat++;
}
/*
@@ -250,6 +252,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
if (list_empty(&counter->list_entry))
return;
ctx->nr_counters--;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat--;
list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry);
@@ -1006,6 +1010,81 @@ static int context_equiv(struct perf_counter_context *ctx1,
&& !ctx1->pin_count && !ctx2->pin_count;
}
+static void __perf_counter_read(void *counter);
+
+static void __perf_counter_sync_stat(struct perf_counter *counter,
+ struct perf_counter *next_counter)
+{
+ u64 value;
+
+ if (!counter->attr.inherit_stat)
+ return;
+
+ /*
+ * Update the counter value, we cannot use perf_counter_read()
+ * because we're in the middle of a context switch and have IRQs
+ * disabled, which upsets smp_call_function_single(), however
+ * we know the counter must be on the current CPU, therefore we
+ * don't need to use it.
+ */
+ switch (counter->state) {
+ case PERF_COUNTER_STATE_ACTIVE:
+ __perf_counter_read(counter);
+ break;
+
+ case PERF_COUNTER_STATE_INACTIVE:
+ update_counter_times(counter);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * In order to keep per-task stats reliable we need to flip the counter
+ * values when we flip the contexts.
+ */
+ value = atomic64_read(&next_counter->count);
+ value = atomic64_xchg(&counter->count, value);
+ atomic64_set(&next_counter->count, value);
+
+ swap(counter->total_time_enabled, next_counter->total_time_enabled);
+ swap(counter->total_time_running, next_counter->total_time_running);
+
+ /*
+ * Since we swizzled the values, update the user visible data too.
+ */
+ perf_counter_update_userpage(counter);
+ perf_counter_update_userpage(next_counter);
+}
+
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+
+static void perf_counter_sync_stat(struct perf_counter_context *ctx,
+ struct perf_counter_context *next_ctx)
+{
+ struct perf_counter *counter, *next_counter;
+
+ if (!ctx->nr_stat)
+ return;
+
+ counter = list_first_entry(&ctx->event_list,
+ struct perf_counter, event_entry);
+
+ next_counter = list_first_entry(&next_ctx->event_list,
+ struct perf_counter, event_entry);
+
+ while (&counter->event_entry != &ctx->event_list &&
+ &next_counter->event_entry != &next_ctx->event_list) {
+
+ __perf_counter_sync_stat(counter, next_counter);
+
+ counter = list_next_entry(counter, event_entry);
+ next_counter = list_next_entry(counter, event_entry);
+ }
+}
+
/*
* Called from scheduler to remove the counters of the current task,
* with interrupts disabled.
@@ -1061,6 +1140,8 @@ void perf_counter_task_sched_out(struct task_struct *task,
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
+
+ perf_counter_sync_stat(ctx, next_ctx);
}
spin_unlock(&next_ctx->lock);
spin_unlock(&ctx->lock);
@@ -1348,9 +1429,56 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
}
/*
+ * Enable all of a task's counters that have been marked enable-on-exec.
+ * This expects task == current.
+ */
+static void perf_counter_enable_on_exec(struct task_struct *task)
+{
+ struct perf_counter_context *ctx;
+ struct perf_counter *counter;
+ unsigned long flags;
+ int enabled = 0;
+
+ local_irq_save(flags);
+ ctx = task->perf_counter_ctxp;
+ if (!ctx || !ctx->nr_counters)
+ goto out;
+
+ __perf_counter_task_sched_out(ctx);
+
+ spin_lock(&ctx->lock);
+
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (!counter->attr.enable_on_exec)
+ continue;
+ counter->attr.enable_on_exec = 0;
+ if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ continue;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_enabled =
+ ctx->time - counter->total_time_enabled;
+ enabled = 1;
+ }
+
+ /*
+ * Unclone this context if we enabled any counter.
+ */
+ if (enabled && ctx->parent_ctx) {
+ put_ctx(ctx->parent_ctx);
+ ctx->parent_ctx = NULL;
+ }
+
+ spin_unlock(&ctx->lock);
+
+ perf_counter_task_sched_in(task, smp_processor_id());
+ out:
+ local_irq_restore(flags);
+}
+
+/*
* Cross CPU call to read the hardware counter
*/
-static void __read(void *info)
+static void __perf_counter_read(void *info)
{
struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx;
@@ -1372,7 +1500,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
*/
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
smp_call_function_single(counter->oncpu,
- __read, counter, 1);
+ __perf_counter_read, counter, 1);
} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
update_counter_times(counter);
}
@@ -1508,11 +1636,13 @@ static void free_counter(struct perf_counter *counter)
{
perf_pending_sync(counter);
- atomic_dec(&nr_counters);
- if (counter->attr.mmap)
- atomic_dec(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_dec(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_dec(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_dec(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_dec(&nr_comm_counters);
+ }
if (counter->destroy)
counter->destroy(counter);
@@ -1751,6 +1881,14 @@ int perf_counter_task_disable(void)
return 0;
}
+static int perf_counter_index(struct perf_counter *counter)
+{
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ return 0;
+
+ return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
@@ -1775,11 +1913,17 @@ void perf_counter_update_userpage(struct perf_counter *counter)
preempt_disable();
++userpg->lock;
barrier();
- userpg->index = counter->hw.idx;
+ userpg->index = perf_counter_index(counter);
userpg->offset = atomic64_read(&counter->count);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count);
+ userpg->time_enabled = counter->total_time_enabled +
+ atomic64_read(&counter->child_total_time_enabled);
+
+ userpg->time_running = counter->total_time_running +
+ atomic64_read(&counter->child_total_time_running);
+
barrier();
++userpg->lock;
preempt_enable();
@@ -2483,15 +2627,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
u32 cpu, reserved;
} cpu_entry;
- header.type = 0;
+ header.type = PERF_EVENT_SAMPLE;
header.size = sizeof(header);
- header.misc = PERF_EVENT_MISC_OVERFLOW;
+ header.misc = 0;
header.misc |= perf_misc_flags(data->regs);
if (sample_type & PERF_SAMPLE_IP) {
ip = perf_instruction_pointer(data->regs);
- header.type |= PERF_SAMPLE_IP;
header.size += sizeof(ip);
}
@@ -2500,7 +2643,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
tid_entry.pid = perf_counter_pid(counter, current);
tid_entry.tid = perf_counter_tid(counter, current);
- header.type |= PERF_SAMPLE_TID;
header.size += sizeof(tid_entry);
}
@@ -2510,34 +2652,25 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
*/
time = sched_clock();
- header.type |= PERF_SAMPLE_TIME;
header.size += sizeof(u64);
}
- if (sample_type & PERF_SAMPLE_ADDR) {
- header.type |= PERF_SAMPLE_ADDR;
+ if (sample_type & PERF_SAMPLE_ADDR)
header.size += sizeof(u64);
- }
- if (sample_type & PERF_SAMPLE_ID) {
- header.type |= PERF_SAMPLE_ID;
+ if (sample_type & PERF_SAMPLE_ID)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_CPU) {
- header.type |= PERF_SAMPLE_CPU;
header.size += sizeof(cpu_entry);
cpu_entry.cpu = raw_smp_processor_id();
}
- if (sample_type & PERF_SAMPLE_PERIOD) {
- header.type |= PERF_SAMPLE_PERIOD;
+ if (sample_type & PERF_SAMPLE_PERIOD)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_GROUP) {
- header.type |= PERF_SAMPLE_GROUP;
header.size += sizeof(u64) +
counter->nr_siblings * sizeof(group_entry);
}
@@ -2547,10 +2680,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
if (callchain) {
callchain_size = (1 + callchain->nr) * sizeof(u64);
-
- header.type |= PERF_SAMPLE_CALLCHAIN;
header.size += callchain_size;
- }
+ } else
+ header.size += sizeof(u64);
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2601,13 +2733,79 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
}
}
- if (callchain)
- perf_output_copy(&handle, callchain, callchain_size);
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ if (callchain)
+ perf_output_copy(&handle, callchain, callchain_size);
+ else {
+ u64 nr = 0;
+ perf_output_put(&handle, nr);
+ }
+ }
perf_output_end(&handle);
}
/*
+ * read event
+ */
+
+struct perf_read_event {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 tid;
+ u64 value;
+ u64 format[3];
+};
+
+static void
+perf_counter_read_event(struct perf_counter *counter,
+ struct task_struct *task)
+{
+ struct perf_output_handle handle;
+ struct perf_read_event event = {
+ .header = {
+ .type = PERF_EVENT_READ,
+ .misc = 0,
+ .size = sizeof(event) - sizeof(event.format),
+ },
+ .pid = perf_counter_pid(counter, task),
+ .tid = perf_counter_tid(counter, task),
+ .value = atomic64_read(&counter->count),
+ };
+ int ret, i = 0;
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_enabled;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_running;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_ID) {
+ u64 id;
+
+ event.header.size += sizeof(u64);
+ if (counter->parent)
+ id = counter->parent->id;
+ else
+ id = counter->id;
+
+ event.format[i++] = id;
+ }
+
+ ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
+ if (ret)
+ return;
+
+ perf_output_copy(&handle, &event, event.header.size);
+ perf_output_end(&handle);
+}
+
+/*
* fork tracking
*/
@@ -2798,6 +2996,9 @@ void perf_counter_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
+ if (task->perf_counter_ctxp)
+ perf_counter_enable_on_exec(task);
+
if (!atomic_read(&nr_comm_counters))
return;
@@ -3317,8 +3518,8 @@ out:
put_cpu_var(perf_cpu_context);
}
-void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+void __perf_swcounter_event(u32 event, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.regs = regs,
@@ -3509,9 +3710,21 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
}
#endif
+atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+
+static void sw_perf_counter_destroy(struct perf_counter *counter)
+{
+ u64 event = counter->attr.config;
+
+ WARN_ON(counter->parent);
+
+ atomic_dec(&perf_swcounter_enabled[event]);
+}
+
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
const struct pmu *pmu = NULL;
+ u64 event = counter->attr.config;
/*
* Software counters (currently) can't in general distinguish
@@ -3520,7 +3733,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (counter->attr.config) {
+ switch (event) {
case PERF_COUNT_SW_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
@@ -3541,6 +3754,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
case PERF_COUNT_SW_CONTEXT_SWITCHES:
case PERF_COUNT_SW_CPU_MIGRATIONS:
+ if (!counter->parent) {
+ atomic_inc(&perf_swcounter_enabled[event]);
+ counter->destroy = sw_perf_counter_destroy;
+ }
pmu = &perf_ops_generic;
break;
}
@@ -3556,6 +3773,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
int cpu,
struct perf_counter_context *ctx,
struct perf_counter *group_leader,
+ struct perf_counter *parent_counter,
gfp_t gfpflags)
{
const struct pmu *pmu;
@@ -3591,6 +3809,8 @@ perf_counter_alloc(struct perf_counter_attr *attr,
counter->ctx = ctx;
counter->oncpu = -1;
+ counter->parent = parent_counter;
+
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
counter->id = atomic64_inc_return(&perf_counter_id);
@@ -3648,11 +3868,13 @@ done:
counter->pmu = pmu;
- atomic_inc(&nr_counters);
- if (counter->attr.mmap)
- atomic_inc(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_inc(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_inc(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_inc(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_inc(&nr_comm_counters);
+ }
return counter;
}
@@ -3815,7 +4037,7 @@ SYSCALL_DEFINE5(perf_counter_open,
}
counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
- GFP_KERNEL);
+ NULL, GFP_KERNEL);
ret = PTR_ERR(counter);
if (IS_ERR(counter))
goto err_put_context;
@@ -3881,7 +4103,8 @@ inherit_counter(struct perf_counter *parent_counter,
child_counter = perf_counter_alloc(&parent_counter->attr,
parent_counter->cpu, child_ctx,
- group_leader, GFP_KERNEL);
+ group_leader, parent_counter,
+ GFP_KERNEL);
if (IS_ERR(child_counter))
return child_counter;
get_ctx(child_ctx);
@@ -3904,12 +4127,6 @@ inherit_counter(struct perf_counter *parent_counter,
*/
add_counter_to_ctx(child_counter, child_ctx);
- child_counter->parent = parent_counter;
- /*
- * inherit into child's child as well:
- */
- child_counter->attr.inherit = 1;
-
/*
* Get a reference to the parent filp - we will fput it
* when the child counter exits. This is safe to do because
@@ -3953,10 +4170,14 @@ static int inherit_group(struct perf_counter *parent_counter,
}
static void sync_child_counter(struct perf_counter *child_counter,
- struct perf_counter *parent_counter)
+ struct task_struct *child)
{
+ struct perf_counter *parent_counter = child_counter->parent;
u64 child_val;
+ if (child_counter->attr.inherit_stat)
+ perf_counter_read_event(child_counter, child);
+
child_val = atomic64_read(&child_counter->count);
/*
@@ -3985,7 +4206,8 @@ static void sync_child_counter(struct perf_counter *child_counter,
static void
__perf_counter_exit_task(struct perf_counter *child_counter,
- struct perf_counter_context *child_ctx)
+ struct perf_counter_context *child_ctx,
+ struct task_struct *child)
{
struct perf_counter *parent_counter;
@@ -3999,7 +4221,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter,
* counters need to be zapped - but otherwise linger.
*/
if (parent_counter) {
- sync_child_counter(child_counter, parent_counter);
+ sync_child_counter(child_counter, child);
free_counter(child_counter);
}
}
@@ -4061,7 +4283,7 @@ void perf_counter_exit_task(struct task_struct *child)
again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry)
- __perf_counter_exit_task(child_counter, child_ctx);
+ __perf_counter_exit_task(child_counter, child_ctx, child);
/*
* If the last counter was a group counter, it will have appended all
diff --git a/kernel/pid.c b/kernel/pid.c
index 31310b5d3f50..5fa1db48d8b7 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -36,6 +36,7 @@
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
#include <linux/syscalls.h>
+#include <linux/kmemleak.h>
#define pid_hashfn(nr, ns) \
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -512,6 +513,12 @@ void __init pidhash_init(void)
pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
if (!pid_hash)
panic("Could not alloc pidhash!\n");
+ /*
+ * pid_hash contains references to allocated struct pid objects and it
+ * must be scanned by kmemleak to avoid false positives.
+ */
+ kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0,
+ GFP_KERNEL);
for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]);
}
diff --git a/kernel/resource.c b/kernel/resource.c
index ac5f3a36923f..78b087221c15 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -787,7 +787,7 @@ static int __init reserve_setup(char *str)
static struct resource reserve[MAXRESERVE];
for (;;) {
- int io_start, io_num;
+ unsigned int io_start, io_num;
int x = reserved;
if (get_option (&str, &io_start) != 2)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4c32b1a1a06e..12327b2bb785 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -359,6 +359,18 @@ config DEBUG_KMEMLEAK
In order to access the kmemleak file, debugfs needs to be
mounted (usually at /sys/kernel/debug).
+config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
+ int "Maximum kmemleak early log entries"
+ depends on DEBUG_KMEMLEAK
+ range 200 2000
+ default 400
+ help
+ Kmemleak must track all the memory allocations to avoid
+ reporting false positives. Since memory may be allocated or
+ freed before kmemleak is initialised, an early log buffer is
+ used to store these actions. If kmemleak reports "early log
+ buffer exceeded", please increase this value.
+
config DEBUG_KMEMLEAK_TEST
tristate "Simple test for the kernel memory leak detector"
depends on DEBUG_KMEMLEAK
diff --git a/mm/dmapool.c b/mm/dmapool.c
index b1f0885dda22..3df063706f53 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -86,10 +86,12 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
unsigned pages = 0;
unsigned blocks = 0;
+ spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
+ spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c96f2c8700aa..e766e1da09d2 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -48,10 +48,10 @@
* scanned. This list is only modified during a scanning episode when the
* scan_mutex is held. At the end of a scan, the gray_list is always empty.
* Note that the kmemleak_object.use_count is incremented when an object is
- * added to the gray_list and therefore cannot be freed
- * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
- * file together with modifications to the memory scanning parameters
- * including the scan_thread pointer
+ * added to the gray_list and therefore cannot be freed. This mutex also
+ * prevents multiple users of the "kmemleak" debugfs file together with
+ * modifications to the memory scanning parameters including the scan_thread
+ * pointer
*
* The kmemleak_object structures have a use_count incremented or decremented
* using the get_object()/put_object() functions. When the use_count becomes
@@ -105,7 +105,6 @@
#define MAX_TRACE 16 /* stack trace length */
#define REPORTS_NR 50 /* maximum number of reported leaks */
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
-#define MSECS_SCAN_YIELD 10 /* CPU yielding period */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
@@ -186,19 +185,16 @@ static atomic_t kmemleak_error = ATOMIC_INIT(0);
static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
-/* used for yielding the CPU to other tasks during scanning */
-static unsigned long next_scan_yield;
static struct task_struct *scan_thread;
-static unsigned long jiffies_scan_yield;
+/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
+static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
-static int kmemleak_stack_scan;
-/* mutex protecting the memory scanning */
+static int kmemleak_stack_scan = 1;
+/* protects the memory scanning, parameters and debug/kmemleak file access */
static DEFINE_MUTEX(scan_mutex);
-/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
-static DEFINE_MUTEX(kmemleak_mutex);
/* number of leaks reported (for limitation purposes) */
static int reported_leaks;
@@ -235,7 +231,7 @@ struct early_log {
};
/* early logging buffer and current position */
-static struct early_log early_log[200];
+static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
static int crt_early_log;
static void kmemleak_disable(void);
@@ -279,15 +275,6 @@ static int color_gray(const struct kmemleak_object *object)
}
/*
- * Objects are considered referenced if their color is gray and they have not
- * been deleted.
- */
-static int referenced_object(struct kmemleak_object *object)
-{
- return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
-}
-
-/*
* Objects are considered unreferenced only if their color is white, they have
* not be deleted and have a minimum age to avoid false positives caused by
* pointers temporarily stored in CPU registers.
@@ -295,42 +282,28 @@ static int referenced_object(struct kmemleak_object *object)
static int unreferenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
- time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
+ time_before_eq(object->jiffies + jiffies_min_age,
+ jiffies_last_scan);
}
/*
- * Printing of the (un)referenced objects information, either to the seq file
- * or to the kernel log. The print_referenced/print_unreferenced functions
- * must be called with the object->lock held.
+ * Printing of the unreferenced objects information to the seq file. The
+ * print_unreferenced function must be called with the object->lock held.
*/
-#define print_helper(seq, x...) do { \
- struct seq_file *s = (seq); \
- if (s) \
- seq_printf(s, x); \
- else \
- pr_info(x); \
-} while (0)
-
-static void print_referenced(struct kmemleak_object *object)
-{
- pr_info("referenced object 0x%08lx (size %zu)\n",
- object->pointer, object->size);
-}
-
static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
- print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
- print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
- print_helper(seq, " backtrace:\n");
+ seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
+ object->pointer, object->size);
+ seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
+ object->comm, object->pid, object->jiffies);
+ seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
void *ptr = (void *)object->trace[i];
- print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
+ seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
}
}
@@ -554,8 +527,10 @@ static void delete_object(unsigned long ptr)
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, 0);
if (!object) {
+#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
ptr);
+#endif
write_unlock_irqrestore(&kmemleak_lock, flags);
return;
}
@@ -571,8 +546,6 @@ static void delete_object(unsigned long ptr)
* cannot be freed when it is being scanned.
*/
spin_lock_irqsave(&object->lock, flags);
- if (object->flags & OBJECT_REPORTED)
- print_referenced(object);
object->flags &= ~OBJECT_ALLOCATED;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
@@ -696,7 +669,8 @@ static void log_early(int op_type, const void *ptr, size_t size,
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- kmemleak_stop("Early log buffer exceeded\n");
+ pr_warning("Early log buffer exceeded\n");
+ kmemleak_disable();
return;
}
@@ -808,21 +782,6 @@ void kmemleak_no_scan(const void *ptr)
EXPORT_SYMBOL(kmemleak_no_scan);
/*
- * Yield the CPU so that other tasks get a chance to run. The yielding is
- * rate-limited to avoid excessive number of calls to the schedule() function
- * during memory scanning.
- */
-static void scan_yield(void)
-{
- might_sleep();
-
- if (time_is_before_eq_jiffies(next_scan_yield)) {
- schedule();
- next_scan_yield = jiffies + jiffies_scan_yield;
- }
-}
-
-/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occured.
*/
@@ -862,15 +821,6 @@ static void scan_block(void *_start, void *_end,
if (scan_should_stop())
break;
- /*
- * When scanning a memory block with a corresponding
- * kmemleak_object, the CPU yielding is handled in the calling
- * code since it holds the object->lock to avoid the block
- * freeing.
- */
- if (!scanned)
- scan_yield();
-
object = find_and_get_object(pointer, 1);
if (!object)
continue;
@@ -952,6 +902,9 @@ static void kmemleak_scan(void)
struct kmemleak_object *object, *tmp;
struct task_struct *task;
int i;
+ int new_leaks = 0;
+
+ jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
@@ -1033,7 +986,7 @@ static void kmemleak_scan(void)
*/
object = list_entry(gray_list.next, typeof(*object), gray_list);
while (&object->gray_list != &gray_list) {
- scan_yield();
+ cond_resched();
/* may add new objects to the list */
if (!scan_should_stop())
@@ -1049,6 +1002,32 @@ static void kmemleak_scan(void)
object = tmp;
}
WARN_ON(!list_empty(&gray_list));
+
+ /*
+ * If scanning was stopped do not report any new unreferenced objects.
+ */
+ if (scan_should_stop())
+ return;
+
+ /*
+ * Scanning result reporting.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if (unreferenced_object(object) &&
+ !(object->flags & OBJECT_REPORTED)) {
+ object->flags |= OBJECT_REPORTED;
+ new_leaks++;
+ }
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+ if (new_leaks)
+ pr_info("%d new suspected memory leaks (see "
+ "/sys/kernel/debug/kmemleak)\n", new_leaks);
+
}
/*
@@ -1070,36 +1049,12 @@ static int kmemleak_scan_thread(void *arg)
}
while (!kthread_should_stop()) {
- struct kmemleak_object *object;
signed long timeout = jiffies_scan_wait;
mutex_lock(&scan_mutex);
-
kmemleak_scan();
- reported_leaks = 0;
-
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- unsigned long flags;
-
- if (reported_leaks >= REPORTS_NR)
- break;
- spin_lock_irqsave(&object->lock, flags);
- if (!(object->flags & OBJECT_REPORTED) &&
- unreferenced_object(object)) {
- print_unreferenced(NULL, object);
- object->flags |= OBJECT_REPORTED;
- reported_leaks++;
- } else if ((object->flags & OBJECT_REPORTED) &&
- referenced_object(object)) {
- print_referenced(object);
- object->flags &= ~OBJECT_REPORTED;
- }
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
mutex_unlock(&scan_mutex);
+
/* wait before the next scan */
while (timeout && !kthread_should_stop())
timeout = schedule_timeout_interruptible(timeout);
@@ -1112,7 +1067,7 @@ static int kmemleak_scan_thread(void *arg)
/*
* Start the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void start_scan_thread(void)
{
@@ -1127,7 +1082,7 @@ void start_scan_thread(void)
/*
* Stop the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void stop_scan_thread(void)
{
@@ -1147,10 +1102,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
struct kmemleak_object *object;
loff_t n = *pos;
- if (!n) {
- kmemleak_scan();
+ if (!n)
reported_leaks = 0;
- }
if (reported_leaks >= REPORTS_NR)
return NULL;
@@ -1211,11 +1164,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
unsigned long flags;
spin_lock_irqsave(&object->lock, flags);
- if (!unreferenced_object(object))
- goto out;
- print_unreferenced(seq, object);
- reported_leaks++;
-out:
+ if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
+ print_unreferenced(seq, object);
+ reported_leaks++;
+ }
spin_unlock_irqrestore(&object->lock, flags);
return 0;
}
@@ -1234,13 +1186,10 @@ static int kmemleak_open(struct inode *inode, struct file *file)
if (!atomic_read(&kmemleak_enabled))
return -EBUSY;
- ret = mutex_lock_interruptible(&kmemleak_mutex);
+ ret = mutex_lock_interruptible(&scan_mutex);
if (ret < 0)
goto out;
if (file->f_mode & FMODE_READ) {
- ret = mutex_lock_interruptible(&scan_mutex);
- if (ret < 0)
- goto kmemleak_unlock;
ret = seq_open(file, &kmemleak_seq_ops);
if (ret < 0)
goto scan_unlock;
@@ -1249,8 +1198,6 @@ static int kmemleak_open(struct inode *inode, struct file *file)
scan_unlock:
mutex_unlock(&scan_mutex);
-kmemleak_unlock:
- mutex_unlock(&kmemleak_mutex);
out:
return ret;
}
@@ -1259,11 +1206,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
{
int ret = 0;
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ)
seq_release(inode, file);
- mutex_unlock(&scan_mutex);
- }
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
return ret;
}
@@ -1278,6 +1223,7 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=off - stop the automatic memory scanning thread
* scan=... - set the automatic memory scanning period in seconds (0 to
* disable it)
+ * scan - trigger a memory scan
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos)
@@ -1315,7 +1261,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
start_scan_thread();
}
- } else
+ } else if (strncmp(buf, "scan", 4) == 0)
+ kmemleak_scan();
+ else
return -EINVAL;
/* ignore the rest of the buffer, only one command at a time */
@@ -1340,11 +1288,9 @@ static int kmemleak_cleanup_thread(void *arg)
{
struct kmemleak_object *object;
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
stop_scan_thread();
- mutex_unlock(&kmemleak_mutex);
- mutex_lock(&scan_mutex);
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list)
delete_object(object->pointer);
@@ -1411,7 +1357,6 @@ void __init kmemleak_init(void)
int i;
unsigned long flags;
- jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
@@ -1486,9 +1431,9 @@ static int __init kmemleak_late_init(void)
&kmemleak_fops);
if (!dentry)
pr_warning("Failed to create the debugfs kmemleak file\n");
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
start_scan_thread();
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
pr_info("Kernel memory leak detector initialized\n");
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7b0dcea4935b..7687879253b9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -541,8 +541,11 @@ static void balance_dirty_pages(struct address_space *mapping)
* filesystems (i.e. NFS) in which data may have been
* written to the server's write cache, but has not yet
* been flushed to permanent storage.
+ * Only move pages to writeback if this bdi is over its
+ * threshold otherwise wait until the disk writes catch
+ * up.
*/
- if (bdi_nr_reclaimable) {
+ if (bdi_nr_reclaimable > bdi_thresh) {
writeback_inodes(&wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5d714f8fb303..e0f2cdf9d8b1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
int i, nid;
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
+ /* save the state before borrow the nodemask */
+ nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
/* If kernelcore was not specified, there is no ZONE_MOVABLE */
if (!required_kernelcore)
- return;
+ goto out;
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
find_usable_zone_for_movable();
@@ -4158,6 +4160,10 @@ restart:
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+ /* restore the node_state */
+ node_states[N_HIGH_MEMORY] = saved_node_state;
}
/* Any regular memory on that node ? */
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
early_node_map[i].start_pfn,
early_node_map[i].end_pfn);
- /*
- * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
- * that node_mask, clear it at first
- */
- nodes_clear(node_states[N_HIGH_MEMORY]);
/* Initialise every node */
mminit_verify_pageflags_layout();
setup_nr_node_ids();
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 105ad10876af..27eda9fdf3c2 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -276,6 +276,9 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
else
return NULL;
+ if (!dev)
+ return NULL;
+
if (dev->type != ARPHRD_IEEE802154) {
dev_put(dev);
return NULL;
@@ -521,3 +524,6 @@ static void __exit ieee802154_nl_exit(void)
}
module_exit(ieee802154_nl_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ieee 802.15.4 configuration interface");
+
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8a3881e28aca..c29d75d8f1b1 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -801,11 +801,8 @@ static int arp_process(struct sk_buff *skb)
* cache.
*/
- /*
- * Special case: IPv4 duplicate address detection packet (RFC2131)
- * and Gratuitous ARP/ARP Announce. (RFC3927, Section 2.4)
- */
- if (sip == 0 || tip == sip) {
+ /* Special case: IPv4 duplicate address detection packet (RFC2131) */
+ if (sip == 0) {
if (arp->ar_op == htons(ARPOP_REQUEST) &&
inet_addr_type(net, tip) == RTN_LOCAL &&
!arp_ignore(in_dev, sip, tip))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 012cf5a68581..00a54b246dfe 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1021,6 +1021,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
(struct node *)tn, wasfull);
tp = node_parent((struct node *) tn);
+ if (!tp)
+ rcu_assign_pointer(t->trie, (struct node *)tn);
+
tnode_free_flush();
if (!tp)
break;
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 155c008626c8..09172a65d9b6 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -191,7 +191,8 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
ct, ctinfo);
/* Tell TCP window tracking about seq change */
nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
- ct, CTINFO2DIR(ctinfo));
+ ct, CTINFO2DIR(ctinfo),
+ (int)rep_len - (int)match_len);
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
}
@@ -377,6 +378,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
struct tcphdr *tcph;
int dir;
__be32 newseq, newack;
+ s16 seqoff, ackoff;
struct nf_conn_nat *nat = nfct_nat(ct);
struct nf_nat_seq *this_way, *other_way;
@@ -390,15 +392,18 @@ nf_nat_seq_adjust(struct sk_buff *skb,
tcph = (void *)skb->data + ip_hdrlen(skb);
if (after(ntohl(tcph->seq), this_way->correction_pos))
- newseq = htonl(ntohl(tcph->seq) + this_way->offset_after);
+ seqoff = this_way->offset_after;
else
- newseq = htonl(ntohl(tcph->seq) + this_way->offset_before);
+ seqoff = this_way->offset_before;
if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
other_way->correction_pos))
- newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after);
+ ackoff = other_way->offset_after;
else
- newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before);
+ ackoff = other_way->offset_before;
+
+ newseq = htonl(ntohl(tcph->seq) + seqoff);
+ newack = htonl(ntohl(tcph->ack_seq) - ackoff);
inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
@@ -413,7 +418,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
return 0;
- nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir);
+ nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
return 1;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 17b89c523f9d..7870a535dac6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -903,13 +903,17 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
iov++;
while (seglen > 0) {
- int copy;
+ int copy = 0;
+ int max = size_goal;
skb = tcp_write_queue_tail(sk);
+ if (tcp_send_head(sk)) {
+ if (skb->ip_summed == CHECKSUM_NONE)
+ max = mss_now;
+ copy = max - skb->len;
+ }
- if (!tcp_send_head(sk) ||
- (copy = size_goal - skb->len) <= 0) {
-
+ if (copy <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
@@ -930,6 +934,7 @@ new_segment:
skb_entail(sk, skb);
copy = size_goal;
+ max = size_goal;
}
/* Try to append data to the end of skb. */
@@ -1028,7 +1033,7 @@ new_segment:
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
- if (skb->len < size_goal || (flags & MSG_OOB))
+ if (skb->len < max || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 416fc4c2e7eb..5bdf08d312d9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -725,7 +725,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
- if (skb->len <= mss_now || !sk_can_gso(sk)) {
+ if (skb->len <= mss_now || !sk_can_gso(sk) ||
+ skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index afde8f991646..2032dfe25ca8 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -617,8 +617,10 @@ err1:
void nf_conntrack_expect_fini(struct net *net)
{
exp_proc_remove(net);
- if (net_eq(net, &init_net))
+ if (net_eq(net, &init_net)) {
+ rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
+ }
nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
nf_ct_expect_hsize);
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 4b2c769d555f..fef95be334bd 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -186,6 +186,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
- synchronize_rcu();
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 33fc0a443f3d..97a82ba75376 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -720,8 +720,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
/* Caller must linearize skb at tcp header. */
void nf_conntrack_tcp_update(const struct sk_buff *skb,
unsigned int dataoff,
- struct nf_conn *ct,
- int dir)
+ struct nf_conn *ct, int dir,
+ s16 offset)
{
const struct tcphdr *tcph = (const void *)skb->data + dataoff;
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
@@ -734,7 +734,7 @@ void nf_conntrack_tcp_update(const struct sk_buff *skb,
/*
* We have to worry for the ack in the reply packet only...
*/
- if (after(end, ct->proto.tcp.seen[dir].td_end))
+ if (ct->proto.tcp.seen[dir].td_end + offset == end)
ct->proto.tcp.seen[dir].td_end = end;
ct->proto.tcp.last_end = end;
spin_unlock_bh(&ct->lock);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 0b7139f3dd78..fc581800698e 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -129,7 +129,7 @@ conntrack_addrcmp(const union nf_inet_addr *kaddr,
static inline bool
conntrack_mt_origsrc(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
@@ -138,7 +138,7 @@ conntrack_mt_origsrc(const struct nf_conn *ct,
static inline bool
conntrack_mt_origdst(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3,
@@ -147,7 +147,7 @@ conntrack_mt_origdst(const struct nf_conn *ct,
static inline bool
conntrack_mt_replsrc(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3,
@@ -156,7 +156,7 @@ conntrack_mt_replsrc(const struct nf_conn *ct,
static inline bool
conntrack_mt_repldst(const struct nf_conn *ct,
- const struct xt_conntrack_mtinfo1 *info,
+ const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3,
@@ -164,7 +164,7 @@ conntrack_mt_repldst(const struct nf_conn *ct,
}
static inline bool
-ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
+ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
const struct nf_conn *ct)
{
const struct nf_conntrack_tuple *tuple;
@@ -204,7 +204,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo1 *info,
static bool
conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
- const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int statebit;
@@ -278,6 +278,16 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
+static bool
+conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par)
+{
+ const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo;
+ struct xt_match_param newpar = *par;
+
+ newpar.matchinfo = *info;
+ return conntrack_mt(skb, &newpar);
+}
+
static bool conntrack_mt_check(const struct xt_mtchk_param *par)
{
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
@@ -288,11 +298,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par)
return true;
}
+static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par)
+{
+ struct xt_conntrack_mtinfo1 *info = par->matchinfo;
+ struct xt_conntrack_mtinfo2 *up;
+ int ret = conntrack_mt_check(par);
+
+ if (ret < 0)
+ return ret;
+
+ up = kmalloc(sizeof(*up), GFP_KERNEL);
+ if (up == NULL) {
+ nf_ct_l3proto_module_put(par->family);
+ return -ENOMEM;
+ }
+
+ /*
+ * The strategy here is to minimize the overhead of v1 matching,
+ * by prebuilding a v2 struct and putting the pointer into the
+ * v1 dataspace.
+ */
+ memcpy(up, info, offsetof(typeof(*info), state_mask));
+ up->state_mask = info->state_mask;
+ up->status_mask = info->status_mask;
+ *(void **)info = up;
+ return true;
+}
+
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_l3proto_module_put(par->family);
}
+static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par)
+{
+ struct xt_conntrack_mtinfo2 **info = par->matchinfo;
+ kfree(*info);
+ conntrack_mt_destroy(par);
+}
+
#ifdef CONFIG_COMPAT
struct compat_xt_conntrack_info
{
@@ -363,6 +407,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
.revision = 1,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
+ .match = conntrack_mt_v1,
+ .checkentry = conntrack_mt_check_v1,
+ .destroy = conntrack_mt_destroy_v1,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "conntrack",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .matchsize = sizeof(struct xt_conntrack_mtinfo2),
.match = conntrack_mt,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b76411444515..b94c21190566 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -407,7 +407,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
}
dst = dst_clone(tp->dst);
skb_dst_set(nskb, dst);
- if (dst)
+ if (!dst)
goto no_route;
/* Build the SCTP header. */
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5f1f86565f16..f2f7c638083e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -668,22 +668,10 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)daddr,
- (struct in6_addr *)
- x->id.daddr.a6))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
@@ -699,26 +687,11 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) {
if (x->props.family != family ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
+ xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4 ||
- x->props.saddr.a4 != saddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)daddr,
- (struct in6_addr *)
- x->id.daddr.a6) ||
- !ipv6_addr_equal((struct in6_addr *)saddr,
- (struct in6_addr *)
- x->props.saddr.a6))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
@@ -1001,25 +974,11 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
x->props.family != family ||
x->km.state != XFRM_STATE_ACQ ||
x->id.spi != 0 ||
- x->id.proto != proto)
+ x->id.proto != proto ||
+ xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
+ xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
- switch (family) {
- case AF_INET:
- if (x->id.daddr.a4 != daddr->a4 ||
- x->props.saddr.a4 != saddr->a4)
- continue;
- break;
- case AF_INET6:
- if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
- (struct in6_addr *)daddr) ||
- !ipv6_addr_equal((struct in6_addr *)
- x->props.saddr.a6,
- (struct in6_addr *)saddr))
- continue;
- break;
- }
-
xfrm_state_hold(x);
return x;
}
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
index 64f5ddb09ea6..5c113123ed9f 100644
--- a/scripts/pnmtologo.c
+++ b/scripts/pnmtologo.c
@@ -237,7 +237,7 @@ static void write_header(void)
fprintf(out, " * Linux logo %s\n", logoname);
fputs(" */\n\n", out);
fputs("#include <linux/linux_logo.h>\n\n", out);
- fprintf(out, "static const unsigned char %s_data[] __initconst = {\n",
+ fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
logoname);
}
@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
fputs("\n};\n\n", out);
/* write logo clut */
- fprintf(out, "static const unsigned char %s_clut[] __initconst = {\n",
+ fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
logoname);
write_hex_cnt = 0;
for (i = 0; i < logo_clutsize; i++) {
diff --git a/tools/perf/CREDITS b/tools/perf/CREDITS
new file mode 100644
index 000000000000..c2ddcb3acbd0
--- /dev/null
+++ b/tools/perf/CREDITS
@@ -0,0 +1,30 @@
+Most of the infrastructure that 'perf' uses here has been reused
+from the Git project, as of version:
+
+ 66996ec: Sync with 1.6.2.4
+
+Here is an (incomplete!) list of main contributors to those files
+in util/* and elsewhere:
+
+ Alex Riesen
+ Christian Couder
+ Dmitry Potapov
+ Jeff King
+ Johannes Schindelin
+ Johannes Sixt
+ Junio C Hamano
+ Linus Torvalds
+ Matthias Kestenholz
+ Michal Ostrowski
+ Miklos Vajna
+ Petr Baudis
+ Pierre Habouzit
+ René Scharfe
+ Samuel Tardieu
+ Shawn O. Pearce
+ Steffen Prohaska
+ Steve Haslam
+
+Thanks guys!
+
+The full history of the files can be found in the upstream Git commits.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 52d3fc6846a9..8aa3f8c88707 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -13,13 +13,25 @@ SYNOPSIS
DESCRIPTION
-----------
This command displays the performance counter profile information recorded
-via perf report.
+via perf record.
OPTIONS
-------
-i::
--input=::
Input file name. (default: perf.data)
+-d::
+--dsos=::
+ Only consider symbols in these dsos. CSV that understands
+ file://filename entries.
+-C::
+--comms=::
+ Only consider symbols in these comms. CSV that understands
+ file://filename entries.
+-S::
+--symbols=::
+ Only consider these symbols. CSV that understands
+ file://filename entries.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index c368a72721d7..0d74346d21ab 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -8,8 +8,8 @@ perf-stat - Run a command and gather performance counter statistics
SYNOPSIS
--------
[verse]
-'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
-'perf stat' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] <command>
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] -- <command> [<options>]
DESCRIPTION
-----------
@@ -40,7 +40,7 @@ OPTIONS
-a::
system-wide collection
--l::
+-S::
scale counter values
EXAMPLES
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 36d7eef49913..9c6d0ae3708e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -290,7 +290,7 @@ LIB_FILE=libperf.a
LIB_H += ../../include/linux/perf_counter.h
LIB_H += perf.h
-LIB_H += types.h
+LIB_H += util/types.h
LIB_H += util/list.h
LIB_H += util/rbtree.h
LIB_H += util/levenshtein.h
@@ -301,6 +301,7 @@ LIB_H += util/util.h
LIB_H += util/help.h
LIB_H += util/strbuf.h
LIB_H += util/string.h
+LIB_H += util/strlist.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/symbol.h
@@ -322,12 +323,15 @@ LIB_OBJS += util/run-command.o
LIB_OBJS += util/quote.o
LIB_OBJS += util/strbuf.o
LIB_OBJS += util/string.o
+LIB_OBJS += util/strlist.o
LIB_OBJS += util/usage.o
LIB_OBJS += util/wrapper.o
LIB_OBJS += util/sigchain.o
LIB_OBJS += util/symbol.o
LIB_OBJS += util/color.o
LIB_OBJS += util/pager.o
+LIB_OBJS += util/header.o
+LIB_OBJS += util/callchain.o
BUILTIN_OBJS += builtin-annotate.o
BUILTIN_OBJS += builtin-help.o
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 7e58e3ad1508..722c0f54e549 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -855,7 +855,7 @@ static unsigned long total = 0,
total_unknown = 0;
static int
-process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
int show = 0;
@@ -1013,10 +1013,10 @@ process_period_event(event_t *event, unsigned long offset, unsigned long head)
static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
- return process_overflow_event(event, offset, head);
-
switch (event->header.type) {
+ case PERF_EVENT_SAMPLE:
+ return process_sample_event(event, offset, head);
+
case PERF_EVENT_MMAP:
return process_mmap_event(event, offset, head);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index d7ebbd757543..d18546f37d7c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -14,6 +14,8 @@
#include "util/parse-events.h"
#include "util/string.h"
+#include "util/header.h"
+
#include <unistd.h>
#include <sched.h>
@@ -39,6 +41,8 @@ static int force = 0;
static int append_file = 0;
static int call_graph = 0;
static int verbose = 0;
+static int inherit_stat = 0;
+static int no_samples = 0;
static long samples;
static struct timeval last_read;
@@ -52,7 +56,8 @@ static int nr_poll;
static int nr_cpu;
static int file_new = 1;
-static struct perf_file_header file_header;
+
+struct perf_header *header;
struct mmap_event {
struct perf_event_header header;
@@ -306,12 +311,11 @@ static void pid_synthesize_mmap_samples(pid_t pid)
continue;
pbf += n + 3;
if (*pbf == 'x') { /* vm_exec */
- char *execname = strrchr(bf, ' ');
+ char *execname = strchr(bf, '/');
- if (execname == NULL || execname[1] != '/')
+ if (execname == NULL)
continue;
- execname += 1;
size = strlen(execname);
execname[size - 1] = '\0'; /* Remove \n */
memcpy(mmap_ev.filename, execname, size);
@@ -329,7 +333,7 @@ static void pid_synthesize_mmap_samples(pid_t pid)
fclose(fp);
}
-static void synthesize_samples(void)
+static void synthesize_all(void)
{
DIR *proc;
struct dirent dirent, *next;
@@ -353,10 +357,35 @@ static void synthesize_samples(void)
static int group_fd;
+static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr)
+{
+ struct perf_header_attr *h_attr;
+
+ if (nr < header->attrs) {
+ h_attr = header->attr[nr];
+ } else {
+ h_attr = perf_header_attr__new(a);
+ perf_header__add_attr(header, h_attr);
+ }
+
+ return h_attr;
+}
+
static void create_counter(int counter, int cpu, pid_t pid)
{
struct perf_counter_attr *attr = attrs + counter;
- int track = 1;
+ struct perf_header_attr *h_attr;
+ int track = !counter; /* only the first counter needs these */
+ struct {
+ u64 count;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+ } read_data;
+
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING |
+ PERF_FORMAT_ID;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
@@ -366,25 +395,20 @@ static void create_counter(int counter, int cpu, pid_t pid)
attr->sample_freq = freq;
}
+ if (no_samples)
+ attr->sample_freq = 0;
+
+ if (inherit_stat)
+ attr->inherit_stat = 1;
+
if (call_graph)
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
- if (file_new) {
- file_header.sample_type = attr->sample_type;
- } else {
- if (file_header.sample_type != attr->sample_type) {
- fprintf(stderr, "incompatible append\n");
- exit(-1);
- }
- }
-
attr->mmap = track;
attr->comm = track;
attr->inherit = (cpu < 0) && inherit;
attr->disabled = 1;
- track = 0; /* only the first counter needs these */
-
try_again:
fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0);
@@ -415,6 +439,22 @@ try_again:
exit(-1);
}
+ h_attr = get_header_attr(attr, counter);
+
+ if (!file_new) {
+ if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
+ fprintf(stderr, "incompatible append\n");
+ exit(-1);
+ }
+ }
+
+ if (read(fd[nr_cpu][counter], &read_data, sizeof(read_data)) == -1) {
+ perror("Unable to read perf file descriptor\n");
+ exit(-1);
+ }
+
+ perf_header_attr__add_id(h_attr, read_data.id);
+
assert(fd[nr_cpu][counter] >= 0);
fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK);
@@ -445,11 +485,6 @@ static void open_counters(int cpu, pid_t pid)
{
int counter;
- if (pid > 0) {
- pid_synthesize_comm_event(pid, 0);
- pid_synthesize_mmap_samples(pid);
- }
-
group_fd = -1;
for (counter = 0; counter < nr_counters; counter++)
create_counter(counter, cpu, pid);
@@ -459,17 +494,16 @@ static void open_counters(int cpu, pid_t pid)
static void atexit_header(void)
{
- file_header.data_size += bytes_written;
+ header->data_size += bytes_written;
- if (pwrite(output, &file_header, sizeof(file_header), 0) == -1)
- perror("failed to write on file headers");
+ perf_header__write(header, output);
}
static int __cmd_record(int argc, const char **argv)
{
int i, counter;
struct stat st;
- pid_t pid;
+ pid_t pid = 0;
int flags;
int ret;
@@ -500,22 +534,31 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- if (!file_new) {
- if (read(output, &file_header, sizeof(file_header)) == -1) {
- perror("failed to read file headers");
- exit(-1);
- }
-
- lseek(output, file_header.data_size, SEEK_CUR);
- }
+ if (!file_new)
+ header = perf_header__read(output);
+ else
+ header = perf_header__new();
atexit(atexit_header);
if (!system_wide) {
- open_counters(-1, target_pid != -1 ? target_pid : getpid());
+ pid = target_pid;
+ if (pid == -1)
+ pid = getpid();
+
+ open_counters(-1, pid);
} else for (i = 0; i < nr_cpus; i++)
open_counters(i, target_pid);
+ if (file_new)
+ perf_header__write(header, output);
+
+ if (!system_wide) {
+ pid_synthesize_comm_event(pid, 0);
+ pid_synthesize_mmap_samples(pid);
+ } else
+ synthesize_all();
+
if (target_pid == -1 && argc) {
pid = fork();
if (pid < 0)
@@ -539,10 +582,7 @@ static int __cmd_record(int argc, const char **argv)
}
}
- if (system_wide)
- synthesize_samples();
-
- while (!done) {
+ for (;;) {
int hits = samples;
for (i = 0; i < nr_cpu; i++) {
@@ -550,8 +590,11 @@ static int __cmd_record(int argc, const char **argv)
mmap_read(&mmap_array[i][counter]);
}
- if (hits == samples)
+ if (hits == samples) {
+ if (done)
+ break;
ret = poll(event_array, nr_poll, 100);
+ }
}
/*
@@ -600,6 +643,10 @@ static const struct option options[] = {
"do call-graph (stack chain/backtrace) recording"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('s', "stat", &inherit_stat,
+ "per thread counts"),
+ OPT_BOOLEAN('n', "no-samples", &no_samples,
+ "don't sample"),
OPT_END()
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 5eb5566f0c95..135b7837e6bf 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -15,8 +15,11 @@
#include "util/rbtree.h"
#include "util/symbol.h"
#include "util/string.h"
+#include "util/callchain.h"
+#include "util/strlist.h"
#include "perf.h"
+#include "util/header.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
@@ -30,6 +33,8 @@ static char *vmlinux = NULL;
static char default_sort_order[] = "comm,dso";
static char *sort_order = default_sort_order;
+static char *dso_list_str, *comm_list_str, *sym_list_str;
+static struct strlist *dso_list, *comm_list, *sym_list;
static int input;
static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
@@ -51,6 +56,9 @@ static char *parent_pattern = default_parent_pattern;
static regex_t parent_regex;
static int exclude_other = 1;
+static int callchain;
+
+static u64 sample_type;
struct ip_event {
struct perf_event_header header;
@@ -59,11 +67,6 @@ struct ip_event {
unsigned char __more_data[];
};
-struct ip_callchain {
- u64 nr;
- u64 ips[0];
-};
-
struct mmap_event {
struct perf_event_header header;
u32 pid, tid;
@@ -97,6 +100,13 @@ struct lost_event {
u64 lost;
};
+struct read_event {
+ struct perf_event_header header;
+ u32 pid,tid;
+ u64 value;
+ u64 format[3];
+};
+
typedef union event_union {
struct perf_event_header header;
struct ip_event ip;
@@ -105,6 +115,7 @@ typedef union event_union {
struct fork_event fork;
struct period_event period;
struct lost_event lost;
+ struct read_event read;
} event_t;
static LIST_HEAD(dsos);
@@ -229,7 +240,7 @@ static u64 vdso__map_ip(struct map *map, u64 ip)
static inline int is_anon_memory(const char *filename)
{
- return strcmp(filename, "//anon") == 0;
+ return strcmp(filename, "//anon") == 0;
}
static struct map *map__new(struct mmap_event *event)
@@ -400,9 +411,27 @@ static void thread__insert_map(struct thread *self, struct map *map)
list_for_each_entry_safe(pos, tmp, &self->maps, node) {
if (map__overlap(pos, map)) {
- list_del_init(&pos->node);
- /* XXX leaks dsos */
- free(pos);
+ if (verbose >= 2) {
+ printf("overlapping maps:\n");
+ map__fprintf(map, stdout);
+ map__fprintf(pos, stdout);
+ }
+
+ if (map->start <= pos->start && map->end > pos->start)
+ pos->start = map->end;
+
+ if (map->end >= pos->end && map->start < pos->end)
+ pos->end = map->start;
+
+ if (verbose >= 2) {
+ printf("after collision:\n");
+ map__fprintf(pos, stdout);
+ }
+
+ if (pos->start >= pos->end) {
+ list_del_init(&pos->node);
+ free(pos);
+ }
}
}
@@ -464,17 +493,19 @@ static size_t threads__fprintf(FILE *fp)
static struct rb_root hist;
struct hist_entry {
- struct rb_node rb_node;
-
- struct thread *thread;
- struct map *map;
- struct dso *dso;
- struct symbol *sym;
- struct symbol *parent;
- u64 ip;
- char level;
-
- u64 count;
+ struct rb_node rb_node;
+
+ struct thread *thread;
+ struct map *map;
+ struct dso *dso;
+ struct symbol *sym;
+ struct symbol *parent;
+ u64 ip;
+ char level;
+ struct callchain_node callchain;
+ struct rb_root sorted_chain;
+
+ u64 count;
};
/*
@@ -745,6 +776,48 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
}
static size_t
+callchain__fprintf(FILE *fp, struct callchain_node *self, u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += callchain__fprintf(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list)
+ ret += fprintf(fp, " %p\n", (void *)chain->ip);
+
+ return ret;
+}
+
+static size_t
+hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
+ u64 total_samples)
+{
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+ size_t ret = 0;
+
+ rb_node = rb_first(&self->sorted_chain);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / total_samples;
+ ret += fprintf(fp, " %6.2f%%\n", percent);
+ ret += callchain__fprintf(fp, chain, total_samples);
+ ret += fprintf(fp, "\n");
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
+
+static size_t
hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
{
struct sort_entry *se;
@@ -784,6 +857,9 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
ret += fprintf(fp, "\n");
+ if (callchain)
+ hist_entry_callchain__fprintf(fp, self, total_samples);
+
return ret;
}
@@ -797,7 +873,7 @@ resolve_symbol(struct thread *thread, struct map **mapp,
{
struct dso *dso = dsop ? *dsop : NULL;
struct map *map = mapp ? *mapp : NULL;
- uint64_t ip = *ipp;
+ u64 ip = *ipp;
if (!thread)
return NULL;
@@ -814,7 +890,6 @@ resolve_symbol(struct thread *thread, struct map **mapp,
*mapp = map;
got_map:
ip = map->map_ip(map, ip);
- *ipp = ip;
dso = map->dso;
} else {
@@ -828,6 +903,8 @@ got_map:
dso = kernel_dso;
}
dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+ dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
+ *ipp = ip;
if (dsop)
*dsop = dso;
@@ -867,6 +944,7 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
.level = level,
.count = count,
.parent = NULL,
+ .sorted_chain = RB_ROOT
};
int cmp;
@@ -909,6 +987,8 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
if (!cmp) {
he->count += count;
+ if (callchain)
+ append_chain(&he->callchain, chain);
return 0;
}
@@ -922,6 +1002,10 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
if (!he)
return -ENOMEM;
*he = entry;
+ if (callchain) {
+ callchain_init(&he->callchain);
+ append_chain(&he->callchain, chain);
+ }
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, &hist);
@@ -998,6 +1082,9 @@ static void output__insert_entry(struct hist_entry *he)
struct rb_node *parent = NULL;
struct hist_entry *iter;
+ if (callchain)
+ sort_chain_to_rbtree(&he->sorted_chain, &he->callchain);
+
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
@@ -1115,7 +1202,7 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
}
static int
-process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
+process_sample_event(event_t *event, unsigned long offset, unsigned long head)
{
char level;
int show = 0;
@@ -1127,12 +1214,12 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
void *more_data = event->ip.__more_data;
struct ip_callchain *chain = NULL;
- if (event->header.type & PERF_SAMPLE_PERIOD) {
+ if (sample_type & PERF_SAMPLE_PERIOD) {
period = *(u64 *)more_data;
more_data += sizeof(u64);
}
- dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n",
+ dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
(void *)(offset + head),
(void *)(long)(event->header.size),
event->header.misc,
@@ -1140,7 +1227,7 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
(void *)(long)ip,
(long long)period);
- if (event->header.type & PERF_SAMPLE_CALLCHAIN) {
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int i;
chain = (void *)more_data;
@@ -1166,6 +1253,9 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
return -1;
}
+ if (comm_list && !strlist__has_entry(comm_list, thread->comm))
+ return 0;
+
if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
show = SHOW_KERNEL;
level = 'k';
@@ -1188,6 +1278,12 @@ process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
if (show & show_mask) {
struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
+ if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
+ return 0;
+
+ if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
+ return 0;
+
if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
eprintf("problem incrementing symbol count, skipping event\n");
return -1;
@@ -1328,14 +1424,27 @@ static void trace_event(event_t *event)
}
static int
+process_read_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->read.pid,
+ event->read.tid,
+ event->read.value);
+
+ return 0;
+}
+
+static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
trace_event(event);
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
- return process_overflow_event(event, offset, head);
-
switch (event->header.type) {
+ case PERF_EVENT_SAMPLE:
+ return process_sample_event(event, offset, head);
+
case PERF_EVENT_MMAP:
return process_mmap_event(event, offset, head);
@@ -1351,6 +1460,9 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
case PERF_EVENT_LOST:
return process_lost_event(event, offset, head);
+ case PERF_EVENT_READ:
+ return process_read_event(event, offset, head);
+
/*
* We dont process them right now but they are fine:
*/
@@ -1366,13 +1478,30 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
return 0;
}
-static struct perf_file_header file_header;
+static struct perf_header *header;
+
+static u64 perf_header__sample_type(void)
+{
+ u64 sample_type = 0;
+ int i;
+
+ for (i = 0; i < header->attrs; i++) {
+ struct perf_header_attr *attr = header->attr[i];
+
+ if (!sample_type)
+ sample_type = attr->attr.sample_type;
+ else if (sample_type != attr->attr.sample_type)
+ die("non matching sample_type");
+ }
+
+ return sample_type;
+}
static int __cmd_report(void)
{
int ret, rc = EXIT_FAILURE;
unsigned long offset = 0;
- unsigned long head = sizeof(file_header);
+ unsigned long head, shift;
struct stat stat;
event_t *event;
uint32_t size;
@@ -1400,13 +1529,12 @@ static int __cmd_report(void)
exit(0);
}
- if (read(input, &file_header, sizeof(file_header)) == -1) {
- perror("failed to read file headers");
- exit(-1);
- }
+ header = perf_header__read(input);
+ head = header->data_offset;
- if (sort__has_parent &&
- !(file_header.sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ sample_type = perf_header__sample_type();
+
+ if (sort__has_parent && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
fprintf(stderr, "selected --sort parent, but no callchain data\n");
exit(-1);
}
@@ -1426,6 +1554,11 @@ static int __cmd_report(void)
cwd = NULL;
cwdlen = 0;
}
+
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
remap:
buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
MAP_SHARED, input, offset);
@@ -1442,9 +1575,10 @@ more:
size = 8;
if (head + event->header.size >= page_size * mmap_window) {
- unsigned long shift = page_size * (head / page_size);
int ret;
+ shift = page_size * (head / page_size);
+
ret = munmap(buf, page_size * mmap_window);
assert(ret == 0);
@@ -1482,7 +1616,7 @@ more:
head += size;
- if (offset + head >= sizeof(file_header) + file_header.data_size)
+ if (offset + head >= header->data_offset + header->data_size)
goto done;
if (offset + head < stat.st_size)
@@ -1536,6 +1670,13 @@ static const struct option options[] = {
"regex filter to identify parent, see: '--sort parent'"),
OPT_BOOLEAN('x', "exclude-other", &exclude_other,
"Only display entries with parent-match"),
+ OPT_BOOLEAN('c', "callchain", &callchain, "Display callchains"),
+ OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
+ OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
+ "only consider symbols in these comms"),
+ OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
OPT_END()
};
@@ -1554,6 +1695,19 @@ static void setup_sorting(void)
free(str);
}
+static void setup_list(struct strlist **list, const char *list_str,
+ const char *list_name)
+{
+ if (list_str) {
+ *list = strlist__new(true, list_str);
+ if (!*list) {
+ fprintf(stderr, "problems parsing %s list\n",
+ list_name);
+ exit(129);
+ }
+ }
+}
+
int cmd_report(int argc, const char **argv, const char *prefix)
{
symbol__init();
@@ -1575,6 +1729,10 @@ int cmd_report(int argc, const char **argv, const char *prefix)
if (argc)
usage_with_options(report_usage, options);
+ setup_list(&dso_list, dso_list_str, "dso");
+ setup_list(&comm_list, comm_list_str, "comm");
+ setup_list(&sym_list, sym_list_str, "symbol");
+
setup_pager();
return __cmd_report();
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 6d3eeac1ea25..2e03524a1de0 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -32,6 +32,7 @@
* Wu Fengguang <[email protected]>
* Mike Galbraith <[email protected]>
* Paul Mackerras <[email protected]>
+ * Jaswinder Singh Rajput <[email protected]>
*
* Released under the GPL v2. (and only v2, not any later version)
*/
@@ -45,7 +46,7 @@
#include <sys/prctl.h>
#include <math.h>
-static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
+static struct perf_counter_attr default_attrs[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
@@ -59,42 +60,28 @@ static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
};
+#define MAX_RUN 100
+
static int system_wide = 0;
-static int inherit = 1;
static int verbose = 0;
-
-static int fd[MAX_NR_CPUS][MAX_COUNTERS];
-
-static int target_pid = -1;
static int nr_cpus = 0;
-static unsigned int page_size;
+static int run_idx = 0;
+static int run_count = 1;
+static int inherit = 1;
static int scale = 1;
+static int target_pid = -1;
+static int null_run = 0;
-static const unsigned int default_count[] = {
- 1000000,
- 1000000,
- 10000,
- 10000,
- 1000000,
- 10000,
-};
-
-#define MAX_RUN 100
-
-static int run_count = 1;
-static int run_idx = 0;
-
-static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
-static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
-
-//static u64 event_hist[MAX_RUN][MAX_COUNTERS][3];
-
+static int fd[MAX_NR_CPUS][MAX_COUNTERS];
static u64 runtime_nsecs[MAX_RUN];
static u64 walltime_nsecs[MAX_RUN];
static u64 runtime_cycles[MAX_RUN];
+static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
+static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
+
static u64 event_res_avg[MAX_COUNTERS][3];
static u64 event_res_noise[MAX_COUNTERS][3];
@@ -109,7 +96,10 @@ static u64 walltime_nsecs_noise;
static u64 runtime_cycles_avg;
static u64 runtime_cycles_noise;
-static void create_perf_stat_counter(int counter)
+#define ERR_PERF_OPEN \
+"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n"
+
+static void create_perf_stat_counter(int counter, int pid)
{
struct perf_counter_attr *attr = attrs + counter;
@@ -119,20 +109,21 @@ static void create_perf_stat_counter(int counter)
if (system_wide) {
int cpu;
- for (cpu = 0; cpu < nr_cpus; cpu ++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0);
- if (fd[cpu][counter] < 0 && verbose) {
- printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[cpu][counter], strerror(errno));
- }
+ if (fd[cpu][counter] < 0 && verbose)
+ fprintf(stderr, ERR_PERF_OPEN, counter,
+ fd[cpu][counter], strerror(errno));
}
} else {
- attr->inherit = inherit;
- attr->disabled = 1;
-
- fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0);
- if (fd[0][counter] < 0 && verbose) {
- printf("Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n", counter, fd[0][counter], strerror(errno));
- }
+ attr->inherit = inherit;
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+
+ fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0);
+ if (fd[0][counter] < 0 && verbose)
+ fprintf(stderr, ERR_PERF_OPEN, counter,
+ fd[0][counter], strerror(errno));
}
}
@@ -168,7 +159,7 @@ static void read_counter(int counter)
count[0] = count[1] = count[2] = 0;
nv = scale ? 3 : 1;
- for (cpu = 0; cpu < nr_cpus; cpu ++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
if (fd[cpu][counter] < 0)
continue;
@@ -215,32 +206,67 @@ static int run_perf_stat(int argc, const char **argv)
int status = 0;
int counter;
int pid;
+ int child_ready_pipe[2], go_pipe[2];
+ char buf;
if (!system_wide)
nr_cpus = 1;
- for (counter = 0; counter < nr_counters; counter++)
- create_perf_stat_counter(counter);
-
- /*
- * Enable counters and exec the command:
- */
- t0 = rdclock();
- prctl(PR_TASK_PERF_COUNTERS_ENABLE);
+ if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) {
+ perror("failed to create pipes");
+ exit(1);
+ }
if ((pid = fork()) < 0)
perror("failed to fork");
if (!pid) {
- if (execvp(argv[0], (char **)argv)) {
- perror(argv[0]);
- exit(-1);
- }
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ read(go_pipe[0], &buf, 1);
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ exit(-1);
}
+ /*
+ * Wait for the child to be ready to exec.
+ */
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ read(child_ready_pipe[0], &buf, 1);
+ close(child_ready_pipe[0]);
+
+ for (counter = 0; counter < nr_counters; counter++)
+ create_perf_stat_counter(counter, pid);
+
+ /*
+ * Enable counters and exec the command:
+ */
+ t0 = rdclock();
+
+ close(go_pipe[1]);
wait(&status);
- prctl(PR_TASK_PERF_COUNTERS_DISABLE);
t1 = rdclock();
walltime_nsecs[run_idx] = t1 - t0;
@@ -262,7 +288,7 @@ static void nsec_printout(int counter, u64 *count, u64 *noise)
{
double msecs = (double)count[0] / 1000000;
- fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter));
+ fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter));
if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) {
@@ -276,7 +302,7 @@ static void nsec_printout(int counter, u64 *count, u64 *noise)
static void abs_printout(int counter, u64 *count, u64 *noise)
{
- fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter));
+ fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter));
if (runtime_cycles_avg &&
attrs[counter].type == PERF_TYPE_HARDWARE &&
@@ -306,7 +332,7 @@ static void print_counter(int counter)
scaled = event_scaled_avg[counter];
if (scaled == -1) {
- fprintf(stderr, " %14s %-20s\n",
+ fprintf(stderr, " %14s %-24s\n",
"<not counted>", event_name(counter));
return;
}
@@ -364,8 +390,11 @@ static void calc_avg(void)
event_res_avg[j]+1, event_res[i][j]+1);
update_avg("counter/2", j,
event_res_avg[j]+2, event_res[i][j]+2);
- update_avg("scaled", j,
- event_scaled_avg + j, event_scaled[i]+j);
+ if (event_scaled[i][j] != -1)
+ update_avg("scaled", j,
+ event_scaled_avg + j, event_scaled[i]+j);
+ else
+ event_scaled_avg[j] = -1;
}
}
runtime_nsecs_avg /= run_count;
@@ -429,11 +458,14 @@ static void print_stat(int argc, const char **argv)
for (counter = 0; counter < nr_counters; counter++)
print_counter(counter);
-
fprintf(stderr, "\n");
- fprintf(stderr, " %14.9f seconds time elapsed.\n",
+ fprintf(stderr, " %14.9f seconds time elapsed",
(double)walltime_nsecs_avg/1e9);
- fprintf(stderr, "\n");
+ if (run_count > 1) {
+ fprintf(stderr, " ( +- %7.3f%% )",
+ 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg);
+ }
+ fprintf(stderr, "\n\n");
}
static volatile int signr = -1;
@@ -466,13 +498,15 @@ static const struct option options[] = {
OPT_INTEGER('p', "pid", &target_pid,
"stat events on existing pid"),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
- "system-wide collection from all CPUs"),
+ "system-wide collection from all CPUs"),
OPT_BOOLEAN('S', "scale", &scale,
- "scale/normalize counters"),
+ "scale/normalize counters"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100)"),
+ OPT_BOOLEAN('n', "null", &null_run,
+ "null run - dont start any counters"),
OPT_END()
};
@@ -480,18 +514,17 @@ int cmd_stat(int argc, const char **argv, const char *prefix)
{
int status;
- page_size = sysconf(_SC_PAGE_SIZE);
-
- memcpy(attrs, default_attrs, sizeof(attrs));
-
argc = parse_options(argc, argv, options, stat_usage, 0);
if (!argc)
usage_with_options(stat_usage, options);
if (run_count <= 0 || run_count > MAX_RUN)
usage_with_options(stat_usage, options);
- if (!nr_counters)
- nr_counters = 8;
+ /* Set attrs and nr_counters if no event is selected and !null_run */
+ if (!null_run && !nr_counters) {
+ memcpy(attrs, default_attrs, sizeof(default_attrs));
+ nr_counters = ARRAY_SIZE(default_attrs);
+ }
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
assert(nr_cpus <= MAX_NR_CPUS);
@@ -511,7 +544,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix)
status = 0;
for (run_idx = 0; run_idx < run_count; run_idx++) {
if (run_count != 1 && verbose)
- fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx+1);
+ fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
status = run_perf_stat(argc, argv);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 5352b5e352ed..cf0d21f1ae10 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -392,11 +392,11 @@ static void record_ip(u64 ip, int counter)
samples--;
}
-static void process_event(u64 ip, int counter)
+static void process_event(u64 ip, int counter, int user)
{
samples++;
- if (ip < min_ip || ip > max_ip) {
+ if (user) {
userspace_samples++;
return;
}
@@ -509,9 +509,10 @@ static void mmap_read_counter(struct mmap_data *md)
old += size;
- if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
- if (event->header.type & PERF_SAMPLE_IP)
- process_event(event->ip.ip, md->counter);
+ if (event->header.type == PERF_EVENT_SAMPLE) {
+ int user =
+ (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER;
+ process_event(event->ip.ip, md->counter, user);
}
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index ceb68aa51f7f..ce394192c85a 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -25,7 +25,7 @@
#include <sys/syscall.h>
#include "../../include/linux/perf_counter.h"
-#include "types.h"
+#include "util/types.h"
/*
* prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
@@ -72,10 +72,9 @@ sys_perf_counter_open(struct perf_counter_attr *attr,
#define MAX_COUNTERS 256
#define MAX_NR_CPUS 256
-struct perf_file_header {
- u64 version;
- u64 sample_type;
- u64 data_size;
+struct ip_callchain {
+ u64 nr;
+ u64 ips[0];
};
#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
new file mode 100644
index 000000000000..ad3c28578961
--- /dev/null
+++ b/tools/perf/util/callchain.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2009, Frederic Weisbecker <[email protected]>
+ *
+ * Handle the callchains from the stream in an ad-hoc radix tree and then
+ * sort them in an rbtree.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+
+#include "callchain.h"
+
+
+static void rb_insert_callchain(struct rb_root *root, struct callchain_node *chain)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct callchain_node *rnode;
+
+ while (*p) {
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node);
+
+ if (rnode->hit < chain->hit)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&chain->rb_node, parent, p);
+ rb_insert_color(&chain->rb_node, root);
+}
+
+/*
+ * Once we get every callchains from the stream, we can now
+ * sort them by hit
+ */
+void sort_chain_to_rbtree(struct rb_root *rb_root, struct callchain_node *node)
+{
+ struct callchain_node *child;
+
+ list_for_each_entry(child, &node->children, brothers)
+ sort_chain_to_rbtree(rb_root, child);
+
+ if (node->hit)
+ rb_insert_callchain(rb_root, node);
+}
+
+static struct callchain_node *create_child(struct callchain_node *parent)
+{
+ struct callchain_node *new;
+
+ new = malloc(sizeof(*new));
+ if (!new) {
+ perror("not enough memory to create child for code path tree");
+ return NULL;
+ }
+ new->parent = parent;
+ INIT_LIST_HEAD(&new->children);
+ INIT_LIST_HEAD(&new->val);
+ list_add_tail(&new->brothers, &parent->children);
+
+ return new;
+}
+
+static void
+fill_node(struct callchain_node *node, struct ip_callchain *chain, int start)
+{
+ int i;
+
+ for (i = start; i < chain->nr; i++) {
+ struct callchain_list *call;
+
+ call = malloc(sizeof(*chain));
+ if (!call) {
+ perror("not enough memory for the code path tree");
+ return;
+ }
+ call->ip = chain->ips[i];
+ list_add_tail(&call->list, &node->val);
+ }
+ node->val_nr = i - start;
+}
+
+static void add_child(struct callchain_node *parent, struct ip_callchain *chain)
+{
+ struct callchain_node *new;
+
+ new = create_child(parent);
+ fill_node(new, chain, parent->val_nr);
+
+ new->hit = 1;
+}
+
+static void
+split_add_child(struct callchain_node *parent, struct ip_callchain *chain,
+ struct callchain_list *to_split, int idx)
+{
+ struct callchain_node *new;
+
+ /* split */
+ new = create_child(parent);
+ list_move_tail(&to_split->list, &new->val);
+ new->hit = parent->hit;
+ parent->hit = 0;
+ parent->val_nr = idx;
+
+ /* create the new one */
+ add_child(parent, chain);
+}
+
+static int
+__append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ int start);
+
+static int
+__append_chain_children(struct callchain_node *root, struct ip_callchain *chain)
+{
+ struct callchain_node *rnode;
+
+ /* lookup in childrens */
+ list_for_each_entry(rnode, &root->children, brothers) {
+ int ret = __append_chain(rnode, chain, root->val_nr);
+ if (!ret)
+ return 0;
+ }
+ return -1;
+}
+
+static int
+__append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ int start)
+{
+ struct callchain_list *cnode;
+ int i = start;
+ bool found = false;
+
+ /* lookup in the current node */
+ list_for_each_entry(cnode, &root->val, list) {
+ if (cnode->ip != chain->ips[i++])
+ break;
+ if (!found)
+ found = true;
+ if (i == chain->nr)
+ break;
+ }
+
+ /* matches not, relay on the parent */
+ if (!found)
+ return -1;
+
+ /* we match only a part of the node. Split it and add the new chain */
+ if (i < root->val_nr) {
+ split_add_child(root, chain, cnode, i);
+ return 0;
+ }
+
+ /* we match 100% of the path, increment the hit */
+ if (i == root->val_nr) {
+ root->hit++;
+ return 0;
+ }
+
+ return __append_chain_children(root, chain);
+}
+
+void append_chain(struct callchain_node *root, struct ip_callchain *chain)
+{
+ if (__append_chain_children(root, chain) == -1)
+ add_child(root, chain);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
new file mode 100644
index 000000000000..fa1cd2f71fd3
--- /dev/null
+++ b/tools/perf/util/callchain.h
@@ -0,0 +1,33 @@
+#ifndef __PERF_CALLCHAIN_H
+#define __PERF_CALLCHAIN_H
+
+#include "../perf.h"
+#include "list.h"
+#include "rbtree.h"
+
+
+struct callchain_node {
+ struct callchain_node *parent;
+ struct list_head brothers;
+ struct list_head children;
+ struct list_head val;
+ struct rb_node rb_node;
+ int val_nr;
+ int hit;
+};
+
+struct callchain_list {
+ unsigned long ip;
+ struct list_head list;
+};
+
+static inline void callchain_init(struct callchain_node *node)
+{
+ INIT_LIST_HEAD(&node->brothers);
+ INIT_LIST_HEAD(&node->children);
+ INIT_LIST_HEAD(&node->val);
+}
+
+void append_chain(struct callchain_node *root, struct ip_callchain *chain);
+void sort_chain_to_rbtree(struct rb_root *rb_root, struct callchain_node *node);
+#endif
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
new file mode 100644
index 000000000000..450384b3bbe5
--- /dev/null
+++ b/tools/perf/util/header.c
@@ -0,0 +1,242 @@
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "util.h"
+#include "header.h"
+
+/*
+ *
+ */
+
+struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr)
+{
+ struct perf_header_attr *self = malloc(sizeof(*self));
+
+ if (!self)
+ die("nomem");
+
+ self->attr = *attr;
+ self->ids = 0;
+ self->size = 1;
+ self->id = malloc(sizeof(u64));
+
+ if (!self->id)
+ die("nomem");
+
+ return self;
+}
+
+void perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
+{
+ int pos = self->ids;
+
+ self->ids++;
+ if (self->ids > self->size) {
+ self->size *= 2;
+ self->id = realloc(self->id, self->size * sizeof(u64));
+ if (!self->id)
+ die("nomem");
+ }
+ self->id[pos] = id;
+}
+
+/*
+ *
+ */
+
+struct perf_header *perf_header__new(void)
+{
+ struct perf_header *self = malloc(sizeof(*self));
+
+ if (!self)
+ die("nomem");
+
+ self->frozen = 0;
+
+ self->attrs = 0;
+ self->size = 1;
+ self->attr = malloc(sizeof(void *));
+
+ if (!self->attr)
+ die("nomem");
+
+ self->data_offset = 0;
+ self->data_size = 0;
+
+ return self;
+}
+
+void perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr)
+{
+ int pos = self->attrs;
+
+ if (self->frozen)
+ die("frozen");
+
+ self->attrs++;
+ if (self->attrs > self->size) {
+ self->size *= 2;
+ self->attr = realloc(self->attr, self->size * sizeof(void *));
+ if (!self->attr)
+ die("nomem");
+ }
+ self->attr[pos] = attr;
+}
+
+static const char *__perf_magic = "PERFFILE";
+
+#define PERF_MAGIC (*(u64 *)__perf_magic)
+
+struct perf_file_section {
+ u64 offset;
+ u64 size;
+};
+
+struct perf_file_attr {
+ struct perf_counter_attr attr;
+ struct perf_file_section ids;
+};
+
+struct perf_file_header {
+ u64 magic;
+ u64 size;
+ u64 attr_size;
+ struct perf_file_section attrs;
+ struct perf_file_section data;
+};
+
+static void do_write(int fd, void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(fd, buf, size);
+
+ if (ret < 0)
+ die("failed to write");
+
+ size -= ret;
+ buf += ret;
+ }
+}
+
+void perf_header__write(struct perf_header *self, int fd)
+{
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ struct perf_header_attr *attr;
+ int i;
+
+ lseek(fd, sizeof(f_header), SEEK_SET);
+
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ attr->id_offset = lseek(fd, 0, SEEK_CUR);
+ do_write(fd, attr->id, attr->ids * sizeof(u64));
+ }
+
+
+ self->attr_offset = lseek(fd, 0, SEEK_CUR);
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ f_attr = (struct perf_file_attr){
+ .attr = attr->attr,
+ .ids = {
+ .offset = attr->id_offset,
+ .size = attr->ids * sizeof(u64),
+ }
+ };
+ do_write(fd, &f_attr, sizeof(f_attr));
+ }
+
+
+ self->data_offset = lseek(fd, 0, SEEK_CUR);
+
+ f_header = (struct perf_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ .attr_size = sizeof(f_attr),
+ .attrs = {
+ .offset = self->attr_offset,
+ .size = self->attrs * sizeof(f_attr),
+ },
+ .data = {
+ .offset = self->data_offset,
+ .size = self->data_size,
+ },
+ };
+
+ lseek(fd, 0, SEEK_SET);
+ do_write(fd, &f_header, sizeof(f_header));
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ self->frozen = 1;
+}
+
+static void do_read(int fd, void *buf, size_t size)
+{
+ while (size) {
+ int ret = read(fd, buf, size);
+
+ if (ret < 0)
+ die("failed to read");
+
+ size -= ret;
+ buf += ret;
+ }
+}
+
+struct perf_header *perf_header__read(int fd)
+{
+ struct perf_header *self = perf_header__new();
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ u64 f_id;
+
+ int nr_attrs, nr_ids, i, j;
+
+ lseek(fd, 0, SEEK_SET);
+ do_read(fd, &f_header, sizeof(f_header));
+
+ if (f_header.magic != PERF_MAGIC ||
+ f_header.size != sizeof(f_header) ||
+ f_header.attr_size != sizeof(f_attr))
+ die("incompatible file format");
+
+ nr_attrs = f_header.attrs.size / sizeof(f_attr);
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+ for (i = 0; i < nr_attrs; i++) {
+ struct perf_header_attr *attr;
+ off_t tmp = lseek(fd, 0, SEEK_CUR);
+
+ do_read(fd, &f_attr, sizeof(f_attr));
+
+ attr = perf_header_attr__new(&f_attr.attr);
+
+ nr_ids = f_attr.ids.size / sizeof(u64);
+ lseek(fd, f_attr.ids.offset, SEEK_SET);
+
+ for (j = 0; j < nr_ids; j++) {
+ do_read(fd, &f_id, sizeof(f_id));
+
+ perf_header_attr__add_id(attr, f_id);
+ }
+ perf_header__add_attr(self, attr);
+ lseek(fd, tmp, SEEK_SET);
+ }
+
+ self->data_offset = f_header.data.offset;
+ self->data_size = f_header.data.size;
+
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ self->frozen = 1;
+
+ return self;
+}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
new file mode 100644
index 000000000000..b5ef53ad4c7a
--- /dev/null
+++ b/tools/perf/util/header.h
@@ -0,0 +1,37 @@
+#ifndef _PERF_HEADER_H
+#define _PERF_HEADER_H
+
+#include "../../../include/linux/perf_counter.h"
+#include <sys/types.h>
+#include "types.h"
+
+struct perf_header_attr {
+ struct perf_counter_attr attr;
+ int ids, size;
+ u64 *id;
+ off_t id_offset;
+};
+
+struct perf_header {
+ int frozen;
+ int attrs, size;
+ struct perf_header_attr **attr;
+ off_t attr_offset;
+ u64 data_offset;
+ u64 data_size;
+};
+
+struct perf_header *perf_header__read(int fd);
+void perf_header__write(struct perf_header *self, int fd);
+
+void perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr);
+
+struct perf_header_attr *
+perf_header_attr__new(struct perf_counter_attr *attr);
+void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
+
+
+struct perf_header *perf_header__new(void);
+
+#endif /* _PERF_HEADER_H */
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index 6653f7dd1d78..17a00e0df2c4 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -126,21 +126,6 @@ static int is_executable(const char *name)
!S_ISREG(st.st_mode))
return 0;
-#ifdef __MINGW32__
- /* cannot trust the executable bit, peek into the file instead */
- char buf[3] = { 0 };
- int n;
- int fd = open(name, O_RDONLY);
- st.st_mode &= ~S_IXUSR;
- if (fd >= 0) {
- n = read(fd, buf, 2);
- if (n == 2)
- /* DOS executables start with "MZ" */
- if (!strcmp(buf, "#!") || !strcmp(buf, "MZ"))
- st.st_mode |= S_IXUSR;
- close(fd);
- }
-#endif
return st.st_mode & S_IXUSR;
}
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
index a28bccae5458..1915de20dcac 100644
--- a/tools/perf/util/pager.c
+++ b/tools/perf/util/pager.c
@@ -9,7 +9,6 @@
static int spawned_pager;
-#ifndef __MINGW32__
static void pager_preexec(void)
{
/*
@@ -24,7 +23,6 @@ static void pager_preexec(void)
setenv("LESS", "FRSX", 0);
}
-#endif
static const char *pager_argv[] = { "sh", "-c", NULL, NULL };
static struct child_process pager_process;
@@ -70,9 +68,8 @@ void setup_pager(void)
pager_argv[2] = pager;
pager_process.argv = pager_argv;
pager_process.in = -1;
-#ifndef __MINGW32__
pager_process.preexec_cb = pager_preexec;
-#endif
+
if (start_command(&pager_process))
return;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 35d04da38d6a..4d042f104cdc 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -16,32 +16,28 @@ struct event_symbol {
u8 type;
u64 config;
char *symbol;
+ char *alias;
};
-#define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y
-#define CR(x, y) .type = PERF_TYPE_##x, .config = y
+#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
+#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
static struct event_symbol event_symbols[] = {
- { C(HARDWARE, HW_CPU_CYCLES), "cpu-cycles", },
- { C(HARDWARE, HW_CPU_CYCLES), "cycles", },
- { C(HARDWARE, HW_INSTRUCTIONS), "instructions", },
- { C(HARDWARE, HW_CACHE_REFERENCES), "cache-references", },
- { C(HARDWARE, HW_CACHE_MISSES), "cache-misses", },
- { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branch-instructions", },
- { C(HARDWARE, HW_BRANCH_INSTRUCTIONS),"branches", },
- { C(HARDWARE, HW_BRANCH_MISSES), "branch-misses", },
- { C(HARDWARE, HW_BUS_CYCLES), "bus-cycles", },
-
- { C(SOFTWARE, SW_CPU_CLOCK), "cpu-clock", },
- { C(SOFTWARE, SW_TASK_CLOCK), "task-clock", },
- { C(SOFTWARE, SW_PAGE_FAULTS), "page-faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS), "faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS_MIN), "minor-faults", },
- { C(SOFTWARE, SW_PAGE_FAULTS_MAJ), "major-faults", },
- { C(SOFTWARE, SW_CONTEXT_SWITCHES), "context-switches", },
- { C(SOFTWARE, SW_CONTEXT_SWITCHES), "cs", },
- { C(SOFTWARE, SW_CPU_MIGRATIONS), "cpu-migrations", },
- { C(SOFTWARE, SW_CPU_MIGRATIONS), "migrations", },
+ { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
+ { CHW(INSTRUCTIONS), "instructions", "" },
+ { CHW(CACHE_REFERENCES), "cache-references", "" },
+ { CHW(CACHE_MISSES), "cache-misses", "" },
+ { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
+ { CHW(BRANCH_MISSES), "branch-misses", "" },
+ { CHW(BUS_CYCLES), "bus-cycles", "" },
+
+ { CSW(CPU_CLOCK), "cpu-clock", "" },
+ { CSW(TASK_CLOCK), "task-clock", "" },
+ { CSW(PAGE_FAULTS), "page-faults", "faults" },
+ { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
+ { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
+ { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
+ { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
};
#define __PERF_COUNTER_FIELD(config, name) \
@@ -74,26 +70,70 @@ static char *sw_event_names[] = {
#define MAX_ALIASES 8
-static char *hw_cache [][MAX_ALIASES] = {
- { "L1-data" , "l1-d", "l1d" },
- { "L1-instruction" , "l1-i", "l1i" },
- { "L2" , "l2" },
- { "Data-TLB" , "dtlb", "d-tlb" },
- { "Instruction-TLB" , "itlb", "i-tlb" },
- { "Branch" , "bpu" , "btb", "bpc" },
+static char *hw_cache[][MAX_ALIASES] = {
+ { "L1-d$", "l1-d", "l1d", "L1-data", },
+ { "L1-i$", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2" },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
};
-static char *hw_cache_op [][MAX_ALIASES] = {
- { "Load" , "read" },
- { "Store" , "write" },
- { "Prefetch" , "speculative-read", "speculative-load" },
+static char *hw_cache_op[][MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
-static char *hw_cache_result [][MAX_ALIASES] = {
- { "Reference" , "ops", "access" },
- { "Miss" },
+static char *hw_cache_result[][MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
};
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+};
+
+static int is_cache_op_valid(u8 cache_type, u8 cache_op)
+{
+ if (hw_cache_stat[cache_type] & COP(cache_op))
+ return 1; /* valid */
+ else
+ return 0; /* invalid */
+}
+
+static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
+{
+ static char name[50];
+
+ if (cache_result) {
+ sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][0],
+ hw_cache_result[cache_result][0]);
+ } else {
+ sprintf(name, "%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][1]);
+ }
+
+ return name;
+}
+
char *event_name(int counter)
{
u64 config = attrs[counter].config;
@@ -113,7 +153,6 @@ char *event_name(int counter)
case PERF_TYPE_HW_CACHE: {
u8 cache_type, cache_op, cache_result;
- static char name[100];
cache_type = (config >> 0) & 0xff;
if (cache_type > PERF_COUNT_HW_CACHE_MAX)
@@ -127,12 +166,10 @@ char *event_name(int counter)
if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
return "unknown-ext-hardware-cache-result";
- sprintf(name, "%s-Cache-%s-%ses",
- hw_cache[cache_type][0],
- hw_cache_op[cache_op][0],
- hw_cache_result[cache_result][0]);
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return "invalid-cache";
- return name;
+ return event_cache_name(cache_type, cache_op, cache_result);
}
case PERF_TYPE_SOFTWARE:
@@ -163,7 +200,8 @@ static int parse_aliases(const char *str, char *names[][MAX_ALIASES], int size)
return -1;
}
-static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr)
+static int
+parse_generic_hw_symbols(const char *str, struct perf_counter_attr *attr)
{
int cache_type = -1, cache_op = 0, cache_result = 0;
@@ -182,6 +220,9 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a
if (cache_op == -1)
cache_op = PERF_COUNT_HW_CACHE_OP_READ;
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return -EINVAL;
+
cache_result = parse_aliases(str, hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX);
/*
@@ -196,6 +237,19 @@ static int parse_generic_hw_symbols(const char *str, struct perf_counter_attr *a
return 0;
}
+static int check_events(const char *str, unsigned int i)
+{
+ if (!strncmp(str, event_symbols[i].symbol,
+ strlen(event_symbols[i].symbol)))
+ return 1;
+
+ if (strlen(event_symbols[i].alias))
+ if (!strncmp(str, event_symbols[i].alias,
+ strlen(event_symbols[i].alias)))
+ return 1;
+ return 0;
+}
+
/*
* Each event can have multiple symbolic names.
* Symbolic names are (almost) exactly matched.
@@ -235,9 +289,7 @@ static int parse_event_symbols(const char *str, struct perf_counter_attr *attr)
}
for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
- if (!strncmp(str, event_symbols[i].symbol,
- strlen(event_symbols[i].symbol))) {
-
+ if (check_events(str, i)) {
attr->type = event_symbols[i].type;
attr->config = event_symbols[i].config;
@@ -289,6 +341,7 @@ void print_events(void)
{
struct event_symbol *syms = event_symbols;
unsigned int i, type, prev_type = -1;
+ char name[40];
fprintf(stderr, "\n");
fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
@@ -301,14 +354,18 @@ void print_events(void)
if (type != prev_type)
fprintf(stderr, "\n");
- fprintf(stderr, " %-30s [%s]\n", syms->symbol,
+ if (strlen(syms->alias))
+ sprintf(name, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strcpy(name, syms->symbol);
+ fprintf(stderr, " %-40s [%s]\n", name,
event_type_descriptors[type]);
prev_type = type;
}
fprintf(stderr, "\n");
- fprintf(stderr, " %-30s [raw hardware event descriptor]\n",
+ fprintf(stderr, " %-40s [raw hardware event descriptor]\n",
"rNNN");
fprintf(stderr, "\n");
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
index b2f5e854f40a..a3935343091a 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/perf/util/run-command.c
@@ -65,7 +65,6 @@ int start_command(struct child_process *cmd)
cmd->err = fderr[0];
}
-#ifndef __MINGW32__
fflush(NULL);
cmd->pid = fork();
if (!cmd->pid) {
@@ -118,71 +117,6 @@ int start_command(struct child_process *cmd)
}
exit(127);
}
-#else
- int s0 = -1, s1 = -1, s2 = -1; /* backups of stdin, stdout, stderr */
- const char **sargv = cmd->argv;
- char **env = environ;
-
- if (cmd->no_stdin) {
- s0 = dup(0);
- dup_devnull(0);
- } else if (need_in) {
- s0 = dup(0);
- dup2(fdin[0], 0);
- } else if (cmd->in) {
- s0 = dup(0);
- dup2(cmd->in, 0);
- }
-
- if (cmd->no_stderr) {
- s2 = dup(2);
- dup_devnull(2);
- } else if (need_err) {
- s2 = dup(2);
- dup2(fderr[1], 2);
- }
-
- if (cmd->no_stdout) {
- s1 = dup(1);
- dup_devnull(1);
- } else if (cmd->stdout_to_stderr) {
- s1 = dup(1);
- dup2(2, 1);
- } else if (need_out) {
- s1 = dup(1);
- dup2(fdout[1], 1);
- } else if (cmd->out > 1) {
- s1 = dup(1);
- dup2(cmd->out, 1);
- }
-
- if (cmd->dir)
- die("chdir in start_command() not implemented");
- if (cmd->env) {
- env = copy_environ();
- for (; *cmd->env; cmd->env++)
- env = env_setenv(env, *cmd->env);
- }
-
- if (cmd->perf_cmd) {
- cmd->argv = prepare_perf_cmd(cmd->argv);
- }
-
- cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, env);
-
- if (cmd->env)
- free_environ(env);
- if (cmd->perf_cmd)
- free(cmd->argv);
-
- cmd->argv = sargv;
- if (s0 >= 0)
- dup2(s0, 0), close(s0);
- if (s1 >= 0)
- dup2(s1, 1), close(s1);
- if (s2 >= 0)
- dup2(s2, 2), close(s2);
-#endif
if (cmd->pid < 0) {
int err = errno;
@@ -288,14 +222,6 @@ int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const
return run_command(&cmd);
}
-#ifdef __MINGW32__
-static __stdcall unsigned run_thread(void *data)
-{
- struct async *async = data;
- return async->proc(async->fd_for_proc, async->data);
-}
-#endif
-
int start_async(struct async *async)
{
int pipe_out[2];
@@ -304,7 +230,6 @@ int start_async(struct async *async)
return error("cannot create pipe: %s", strerror(errno));
async->out = pipe_out[0];
-#ifndef __MINGW32__
/* Flush stdio before fork() to avoid cloning buffers */
fflush(NULL);
@@ -319,33 +244,17 @@ int start_async(struct async *async)
exit(!!async->proc(pipe_out[1], async->data));
}
close(pipe_out[1]);
-#else
- async->fd_for_proc = pipe_out[1];
- async->tid = (HANDLE) _beginthreadex(NULL, 0, run_thread, async, 0, NULL);
- if (!async->tid) {
- error("cannot create thread: %s", strerror(errno));
- close_pair(pipe_out);
- return -1;
- }
-#endif
+
return 0;
}
int finish_async(struct async *async)
{
-#ifndef __MINGW32__
int ret = 0;
if (wait_or_whine(async->pid))
ret = error("waitpid (async) failed");
-#else
- DWORD ret = 0;
- if (WaitForSingleObject(async->tid, INFINITE) != WAIT_OBJECT_0)
- ret = error("waiting for thread failed: %lu", GetLastError());
- else if (!GetExitCodeThread(async->tid, &ret))
- ret = error("cannot get thread exit code: %lu", GetLastError());
- CloseHandle(async->tid);
-#endif
+
return ret;
}
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h
index 328289f23669..cc1837deba88 100644
--- a/tools/perf/util/run-command.h
+++ b/tools/perf/util/run-command.h
@@ -79,12 +79,7 @@ struct async {
int (*proc)(int fd, void *data);
void *data;
int out; /* caller reads from here and closes it */
-#ifndef __MINGW32__
pid_t pid;
-#else
- HANDLE tid;
- int fd_for_proc;
-#endif
};
int start_async(struct async *async);
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index eaba09306802..464e7ca898cf 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -259,7 +259,7 @@ size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f)
res = fread(sb->buf + sb->len, 1, size, f);
if (res > 0)
strbuf_setlen(sb, sb->len + res);
- else if (res < 0 && oldalloc == 0)
+ else if (oldalloc == 0)
strbuf_release(sb);
return res;
}
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index 37b03255b425..3dca2f654cd0 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -1,7 +1,7 @@
#ifndef _PERF_STRING_H_
#define _PERF_STRING_H_
-#include "../types.h"
+#include "types.h"
int hex2u64(const char *ptr, u64 *val);
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
new file mode 100644
index 000000000000..025a78edfffe
--- /dev/null
+++ b/tools/perf/util/strlist.c
@@ -0,0 +1,184 @@
+/*
+ * (c) 2009 Arnaldo Carvalho de Melo <[email protected]>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include "strlist.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static struct str_node *str_node__new(const char *s, bool dupstr)
+{
+ struct str_node *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ if (dupstr) {
+ s = strdup(s);
+ if (s == NULL)
+ goto out_delete;
+ }
+ self->s = s;
+ }
+
+ return self;
+
+out_delete:
+ free(self);
+ return NULL;
+}
+
+static void str_node__delete(struct str_node *self, bool dupstr)
+{
+ if (dupstr)
+ free((void *)self->s);
+ free(self);
+}
+
+int strlist__add(struct strlist *self, const char *new_entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct str_node *sn;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, new_entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ sn = str_node__new(new_entry, self->dupstr);
+ if (sn == NULL)
+ return -ENOMEM;
+
+ rb_link_node(&sn->rb_node, parent, p);
+ rb_insert_color(&sn->rb_node, &self->entries);
+
+ return 0;
+}
+
+int strlist__load(struct strlist *self, const char *filename)
+{
+ char entry[1024];
+ int err;
+ FILE *fp = fopen(filename, "r");
+
+ if (fp == NULL)
+ return errno;
+
+ while (fgets(entry, sizeof(entry), fp) != NULL) {
+ const size_t len = strlen(entry);
+
+ if (len == 0)
+ continue;
+ entry[len - 1] = '\0';
+
+ err = strlist__add(self, entry);
+ if (err != 0)
+ goto out;
+ }
+
+ err = 0;
+out:
+ fclose(fp);
+ return err;
+}
+
+void strlist__remove(struct strlist *self, struct str_node *sn)
+{
+ rb_erase(&sn->rb_node, &self->entries);
+ str_node__delete(sn, self->dupstr);
+}
+
+bool strlist__has_entry(struct strlist *self, const char *entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ struct str_node *sn;
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return true;
+ }
+
+ return false;
+}
+
+static int strlist__parse_list_entry(struct strlist *self, const char *s)
+{
+ if (strncmp(s, "file://", 7) == 0)
+ return strlist__load(self, s + 7);
+
+ return strlist__add(self, s);
+}
+
+int strlist__parse_list(struct strlist *self, const char *s)
+{
+ char *sep;
+ int err;
+
+ while ((sep = strchr(s, ',')) != NULL) {
+ *sep = '\0';
+ err = strlist__parse_list_entry(self, s);
+ *sep = ',';
+ if (err != 0)
+ return err;
+ s = sep + 1;
+ }
+
+ return *s ? strlist__parse_list_entry(self, s) : 0;
+}
+
+struct strlist *strlist__new(bool dupstr, const char *slist)
+{
+ struct strlist *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->entries = RB_ROOT;
+ self->dupstr = dupstr;
+ if (slist && strlist__parse_list(self, slist) != 0)
+ goto out_error;
+ }
+
+ return self;
+out_error:
+ free(self);
+ return NULL;
+}
+
+void strlist__delete(struct strlist *self)
+{
+ if (self != NULL) {
+ struct str_node *pos;
+ struct rb_node *next = rb_first(&self->entries);
+
+ while (next) {
+ pos = rb_entry(next, struct str_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ strlist__remove(self, pos);
+ }
+ self->entries = RB_ROOT;
+ free(self);
+ }
+}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
new file mode 100644
index 000000000000..2fb117fb4b67
--- /dev/null
+++ b/tools/perf/util/strlist.h
@@ -0,0 +1,32 @@
+#ifndef STRLIST_H_
+#define STRLIST_H_
+
+#include "rbtree.h"
+#include <stdbool.h>
+
+struct str_node {
+ struct rb_node rb_node;
+ const char *s;
+};
+
+struct strlist {
+ struct rb_root entries;
+ bool dupstr;
+};
+
+struct strlist *strlist__new(bool dupstr, const char *slist);
+void strlist__delete(struct strlist *self);
+
+void strlist__remove(struct strlist *self, struct str_node *sn);
+int strlist__load(struct strlist *self, const char *filename);
+int strlist__add(struct strlist *self, const char *str);
+
+bool strlist__has_entry(struct strlist *self, const char *entry);
+
+static inline bool strlist__empty(const struct strlist *self)
+{
+ return rb_first(&self->entries) == NULL;
+}
+
+int strlist__parse_list(struct strlist *self, const char *s);
+#endif /* STRLIST_H_ */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 86e14375e74e..78c2efde01b7 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -520,7 +520,9 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
-
+ self->prelinked = elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.prelink_undo",
+ NULL) != NULL;
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
struct symbol *f;
u64 obj_start;
@@ -535,7 +537,13 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
gelf_getshdr(sec, &shdr);
obj_start = sym.st_value;
- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ if (self->prelinked) {
+ if (verbose >= 2)
+ printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n",
+ (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset);
+
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ }
f = symbol__new(sym.st_value, sym.st_size,
elf_sym__name(&sym, symstrs),
@@ -569,6 +577,8 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose)
if (!name)
return -1;
+ self->prelinked = 0;
+
if (strncmp(self->name, "/tmp/perf-", 10) == 0)
return dso__load_perf_map(self, filter, verbose);
@@ -629,7 +639,7 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
if (vmlinux)
err = dso__load_vmlinux(self, vmlinux, filter, verbose);
- if (err)
+ if (err < 0)
err = dso__load_kallsyms(self, filter, verbose);
return err;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index ea332e56e458..2c48ace8203b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -2,7 +2,7 @@
#define _PERF_SYMBOL_ 1
#include <linux/types.h>
-#include "../types.h"
+#include "types.h"
#include "list.h"
#include "rbtree.h"
@@ -20,8 +20,9 @@ struct symbol {
struct dso {
struct list_head node;
struct rb_root syms;
- unsigned int sym_priv_size;
struct symbol *(*find_symbol)(struct dso *, u64 ip);
+ unsigned int sym_priv_size;
+ unsigned char prelinked;
char name[0];
};
diff --git a/tools/perf/types.h b/tools/perf/util/types.h
index 5e75f9005940..5e75f9005940 100644
--- a/tools/perf/types.h
+++ b/tools/perf/util/types.h
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b8cfed776d81..b4be6071c105 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -67,7 +67,6 @@
#include <assert.h>
#include <regex.h>
#include <utime.h>
-#ifndef __MINGW32__
#include <sys/wait.h>
#include <sys/poll.h>
#include <sys/socket.h>
@@ -81,20 +80,6 @@
#include <netdb.h>
#include <pwd.h>
#include <inttypes.h>
-#if defined(__CYGWIN__)
-#undef _XOPEN_SOURCE
-#include <grp.h>
-#define _XOPEN_SOURCE 600
-#include "compat/cygwin.h"
-#else
-#undef _ALL_SOURCE /* AIX 5.3L defines a struct list with _ALL_SOURCE. */
-#include <grp.h>
-#define _ALL_SOURCE 1
-#endif
-#else /* __MINGW32__ */
-/* pull in Windows compatibility stuff */
-#include "compat/mingw.h"
-#endif /* __MINGW32__ */
#ifndef NO_ICONV
#include <iconv.h>