0bd3a173d7
Currently local_bh_disable() is out-of-line for no apparent reason. So inline it to save a few cycles on call/return nonsense, the function body is a single add on x86 (a few loads and store extra on load/store archs). Also expose two new local_bh functions: __local_bh_{dis,en}able_ip(unsigned long ip, unsigned int cnt); Which implement the actual local_bh_{dis,en}able() behaviour. The next patch uses the exposed @cnt argument to optimize bh lock functions. With build fixes from Jacob Pan. Cc: rjw@rjwysocki.net Cc: rui.zhang@intel.com Cc: jacob.jun.pan@linux.intel.com Cc: Mike Galbraith <bitbucket@online.de> Cc: hpa@zytor.com Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: lenb@kernel.org Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131119151338.GF3694@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
35 lines
796 B
C
35 lines
796 B
C
#ifndef _LINUX_BH_H
|
|
#define _LINUX_BH_H
|
|
|
|
#include <linux/preempt.h>
|
|
#include <linux/preempt_mask.h>
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
|
|
#else
|
|
static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
|
|
{
|
|
preempt_count_add(cnt);
|
|
barrier();
|
|
}
|
|
#endif
|
|
|
|
static inline void local_bh_disable(void)
|
|
{
|
|
__local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
|
|
}
|
|
|
|
extern void _local_bh_enable(void);
|
|
extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
|
|
|
|
static inline void local_bh_enable_ip(unsigned long ip)
|
|
{
|
|
__local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET);
|
|
}
|
|
|
|
static inline void local_bh_enable(void)
|
|
{
|
|
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
|
|
}
|
|
|
|
#endif /* _LINUX_BH_H */
|