diff options
Diffstat (limited to 'kernel/trace/blktrace.c')
| -rw-r--r-- | kernel/trace/blktrace.c | 20 | 
1 files changed, 10 insertions, 10 deletions
| diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 1183c88634aa..af68a67179b4 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -34,7 +34,7 @@ static struct trace_array *blk_tr;  static bool blk_tracer_enabled __read_mostly;  static LIST_HEAD(running_trace_list); -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); +static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);  /* Select an alternative, minimalistic output than the original one */  #define TRACE_BLK_OPT_CLASSIC	0x1 @@ -121,12 +121,12 @@ static void trace_note_tsk(struct task_struct *tsk)  	struct blk_trace *bt;  	tsk->btrace_seq = blktrace_seq; -	spin_lock_irqsave(&running_trace_lock, flags); +	raw_spin_lock_irqsave(&running_trace_lock, flags);  	list_for_each_entry(bt, &running_trace_list, running_list) {  		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,  			   sizeof(tsk->comm), 0);  	} -	spin_unlock_irqrestore(&running_trace_lock, flags); +	raw_spin_unlock_irqrestore(&running_trace_lock, flags);  }  static void trace_note_time(struct blk_trace *bt) @@ -666,9 +666,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)  			blktrace_seq++;  			smp_mb();  			bt->trace_state = Blktrace_running; -			spin_lock_irq(&running_trace_lock); +			raw_spin_lock_irq(&running_trace_lock);  			list_add(&bt->running_list, &running_trace_list); -			spin_unlock_irq(&running_trace_lock); +			raw_spin_unlock_irq(&running_trace_lock);  			trace_note_time(bt);  			ret = 0; @@ -676,9 +676,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)  	} else {  		if (bt->trace_state == Blktrace_running) {  			bt->trace_state = Blktrace_stopped; -			spin_lock_irq(&running_trace_lock); +			raw_spin_lock_irq(&running_trace_lock);  			list_del_init(&bt->running_list); -			spin_unlock_irq(&running_trace_lock); +			raw_spin_unlock_irq(&running_trace_lock);  			relay_flush(bt->rchan);  			ret = 0;  		} @@ -1045,7 +1045,7 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,  	}  	r.device_from = cpu_to_be32(dev); -	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk)); +	r.device_to   = cpu_to_be32(disk_devt(rq->q->disk));  	r.sector_from = cpu_to_be64(from);  	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), @@ -1608,9 +1608,9 @@ static int blk_trace_remove_queue(struct request_queue *q)  	if (bt->trace_state == Blktrace_running) {  		bt->trace_state = Blktrace_stopped; -		spin_lock_irq(&running_trace_lock); +		raw_spin_lock_irq(&running_trace_lock);  		list_del_init(&bt->running_list); -		spin_unlock_irq(&running_trace_lock); +		raw_spin_unlock_irq(&running_trace_lock);  		relay_flush(bt->rchan);  	} |