aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/x86/kernel/jump_label.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/jump_label.c')
-rw-r--r--arch/x86/kernel/jump_label.c121
1 files changed, 96 insertions, 25 deletions
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index e631c358f7f4..044053235302 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -35,41 +35,43 @@ static void bug_at(unsigned char *ip, int line)
BUG();
}
-static void __ref __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type,
- int init)
+static void __jump_label_set_jump_code(struct jump_entry *entry,
+ enum jump_label_type type,
+ union jump_code_union *code,
+ int init)
{
- union jump_code_union jmp;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
- const void *expect, *code;
+ const void *expect;
int line;
- jmp.jump = 0xe9;
- jmp.offset = jump_entry_target(entry) -
- (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
+ code->jump = 0xe9;
+ code->offset = jump_entry_target(entry) -
+ (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
- if (type == JUMP_LABEL_JMP) {
- if (init) {
- expect = default_nop; line = __LINE__;
- } else {
- expect = ideal_nop; line = __LINE__;
- }
-
- code = &jmp.code;
+ if (init) {
+ expect = default_nop; line = __LINE__;
+ } else if (type == JUMP_LABEL_JMP) {
+ expect = ideal_nop; line = __LINE__;
} else {
- if (init) {
- expect = default_nop; line = __LINE__;
- } else {
- expect = &jmp.code; line = __LINE__;
- }
-
- code = ideal_nop;
+ expect = code->code; line = __LINE__;
}
if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
bug_at((void *)jump_entry_code(entry), line);
+ if (type == JUMP_LABEL_NOP)
+ memcpy(code, ideal_nop, JUMP_LABEL_NOP_SIZE);
+}
+
+static void __ref __jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ int init)
+{
+ union jump_code_union code;
+
+ __jump_label_set_jump_code(entry, type, &code, init);
+
/*
* As long as only a single processor is running and the code is still
* not marked as RO, text_poke_early() can be used; Checking that
@@ -82,12 +84,12 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*/
if (init || system_state == SYSTEM_BOOTING) {
- text_poke_early((void *)jump_entry_code(entry), code,
+ text_poke_early((void *)jump_entry_code(entry), &code,
JUMP_LABEL_NOP_SIZE);
return;
}
- text_poke_bp((void *)jump_entry_code(entry), code, JUMP_LABEL_NOP_SIZE,
+ text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE,
(void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
}
@@ -99,6 +101,75 @@ void arch_jump_label_transform(struct jump_entry *entry,
mutex_unlock(&text_mutex);
}
+#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
+static struct text_poke_loc tp_vec[TP_VEC_MAX];
+static int tp_vec_nr;
+
+bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ struct text_poke_loc *tp;
+ void *entry_code;
+
+ if (system_state == SYSTEM_BOOTING) {
+ /*
+ * Fallback to the non-batching mode.
+ */
+ arch_jump_label_transform(entry, type);
+ return true;
+ }
+
+ /*
+ * No more space in the vector, tell upper layer to apply
+ * the queue before continuing.
+ */
+ if (tp_vec_nr == TP_VEC_MAX)
+ return false;
+
+ tp = &tp_vec[tp_vec_nr];
+
+ entry_code = (void *)jump_entry_code(entry);
+
+ /*
+ * The INT3 handler will do a bsearch in the queue, so we need entries
+ * to be sorted. We can survive an unsorted list by rejecting the entry,
+ * forcing the generic jump_label code to apply the queue. Warning once,
+ * to raise the attention to the case of an unsorted entry that is
+ * better not happen, because, in the worst case we will perform in the
+ * same way as we do without batching - with some more overhead.
+ */
+ if (tp_vec_nr > 0) {
+ int prev = tp_vec_nr - 1;
+ struct text_poke_loc *prev_tp = &tp_vec[prev];
+
+ if (WARN_ON_ONCE(prev_tp->addr > entry_code))
+ return false;
+ }
+
+ __jump_label_set_jump_code(entry, type,
+ (union jump_code_union *) &tp->opcode, 0);
+
+ tp->addr = entry_code;
+ tp->detour = entry_code + JUMP_LABEL_NOP_SIZE;
+ tp->len = JUMP_LABEL_NOP_SIZE;
+
+ tp_vec_nr++;
+
+ return true;
+}
+
+void arch_jump_label_transform_apply(void)
+{
+ if (!tp_vec_nr)
+ return;
+
+ mutex_lock(&text_mutex);
+ text_poke_bp_batch(tp_vec, tp_vec_nr);
+ mutex_unlock(&text_mutex);
+
+ tp_vec_nr = 0;
+}
+
static enum {
JL_STATE_START,
JL_STATE_NO_UPDATE,