aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Ott <[email protected]>2017-10-12 13:57:26 +0200
committerMartin Schwidefsky <[email protected]>2017-10-18 14:11:23 +0200
commit0dcd91a9e6cc0a401416bbdd34b0a255cde0aee1 (patch)
tree9a095afe44969df5b0a6ad4ee8a251257db78645
parent94158e544fd60c6a94af348790dae76578ed8dae (diff)
s390/debug: only write data once
debug_event_common memsets the active debug entry with zeros to prevent stale data leakage. This is overwritten with the actual debug data in the next step. Only write zeros to that part of the debug entry that's not used by new debug data. Micro benchmarks show a 2-10% reduction of cpu cycles with this approach. Signed-off-by: Sebastian Ott <[email protected]> Acked-by: Michael Holzheu <[email protected]> Signed-off-by: Martin Schwidefsky <[email protected]>
-rw-r--r--arch/s390/kernel/debug.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index df916738fbd8..2df679d75454 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -902,8 +902,9 @@ debug_entry_t *debug_event_common(debug_info_t *id, int level, const void *buf,
}
do {
active = get_active_entry(id);
- memset(DEBUG_DATA(active), 0, id->buf_size);
memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+ if (len < id->buf_size)
+ memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
debug_finish_entry(id, active, level, 0);
len -= id->buf_size;
buf += id->buf_size;
@@ -934,8 +935,9 @@ debug_entry_t *debug_exception_common(debug_info_t *id, int level,
}
do {
active = get_active_entry(id);
- memset(DEBUG_DATA(active), 0, id->buf_size);
memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+ if (len < id->buf_size)
+ memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
debug_finish_entry(id, active, level, len <= id->buf_size);
len -= id->buf_size;
buf += id->buf_size;