aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/Makefile38
-rw-r--r--arch/tile/kernel/asm-offsets.c84
-rw-r--r--arch/tile/kernel/backtrace.c683
-rw-r--r--arch/tile/kernel/compat.c117
-rw-r--r--arch/tile/kernel/compat_signal.c172
-rw-r--r--arch/tile/kernel/early_printk.c75
-rw-r--r--arch/tile/kernel/entry.S64
-rw-r--r--arch/tile/kernel/ftrace.c239
-rw-r--r--arch/tile/kernel/hardwall.c1096
-rw-r--r--arch/tile/kernel/head_32.S183
-rw-r--r--arch/tile/kernel/head_64.S279
-rw-r--r--arch/tile/kernel/hvglue.S76
-rw-r--r--arch/tile/kernel/hvglue_trace.c270
-rw-r--r--arch/tile/kernel/intvec_32.S1906
-rw-r--r--arch/tile/kernel/intvec_64.S1564
-rw-r--r--arch/tile/kernel/irq.c280
-rw-r--r--arch/tile/kernel/jump_label.c62
-rw-r--r--arch/tile/kernel/kgdb.c497
-rw-r--r--arch/tile/kernel/kprobes.c527
-rw-r--r--arch/tile/kernel/machine_kexec.c298
-rw-r--r--arch/tile/kernel/mcount_64.S211
-rw-r--r--arch/tile/kernel/messaging.c115
-rw-r--r--arch/tile/kernel/module.c231
-rw-r--r--arch/tile/kernel/pci-dma.c607
-rw-r--r--arch/tile/kernel/pci.c592
-rw-r--r--arch/tile/kernel/pci_gx.c1592
-rw-r--r--arch/tile/kernel/perf_event.c1005
-rw-r--r--arch/tile/kernel/pmc.c118
-rw-r--r--arch/tile/kernel/proc.c160
-rw-r--r--arch/tile/kernel/process.c659
-rw-r--r--arch/tile/kernel/ptrace.c316
-rw-r--r--arch/tile/kernel/reboot.c51
-rw-r--r--arch/tile/kernel/regs_32.S145
-rw-r--r--arch/tile/kernel/regs_64.S145
-rw-r--r--arch/tile/kernel/relocate_kernel_32.S269
-rw-r--r--arch/tile/kernel/relocate_kernel_64.S263
-rw-r--r--arch/tile/kernel/setup.c1743
-rw-r--r--arch/tile/kernel/signal.c411
-rw-r--r--arch/tile/kernel/single_step.c786
-rw-r--r--arch/tile/kernel/smp.c287
-rw-r--r--arch/tile/kernel/smpboot.c269
-rw-r--r--arch/tile/kernel/stack.c539
-rw-r--r--arch/tile/kernel/sys.c130
-rw-r--r--arch/tile/kernel/sysfs.c266
-rw-r--r--arch/tile/kernel/tile-desc_32.c2605
-rw-r--r--arch/tile/kernel/tile-desc_64.c2218
-rw-r--r--arch/tile/kernel/time.c306
-rw-r--r--arch/tile/kernel/tlb.c104
-rw-r--r--arch/tile/kernel/traps.c421
-rw-r--r--arch/tile/kernel/unaligned.c1603
-rw-r--r--arch/tile/kernel/usb.c71
-rw-r--r--arch/tile/kernel/vdso.c197
-rw-r--r--arch/tile/kernel/vdso/Makefile117
-rw-r--r--arch/tile/kernel/vdso/vdso.S28
-rw-r--r--arch/tile/kernel/vdso/vdso.lds.S89
-rw-r--r--arch/tile/kernel/vdso/vdso32.S28
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c198
-rw-r--r--arch/tile/kernel/vdso/vrt_sigreturn.S30
-rw-r--r--arch/tile/kernel/vmlinux.lds.S105
59 files changed, 0 insertions, 27540 deletions
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
deleted file mode 100644
index 3e43d78731a8..000000000000
--- a/arch/tile/kernel/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Linux/TILE kernel.
-#
-
-extra-y := vmlinux.lds head_$(BITS).o
-obj-y := backtrace.o entry.o hvglue.o irq.o messaging.o \
- pci-dma.o proc.o process.o ptrace.o reboot.o \
- setup.o signal.o single_step.o stack.o sys.o \
- sysfs.o time.o traps.o unaligned.o vdso.o \
- intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
-
-ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_early_printk.o = -pg
-endif
-
-obj-$(CONFIG_HARDWALL) += hardwall.o
-obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
-obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
-obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
-ifdef CONFIG_TILEGX
-obj-$(CONFIG_PCI) += pci_gx.o
-else
-obj-$(CONFIG_PCI) += pci.o
-endif
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o
-obj-$(CONFIG_USE_PMC) += pmc.o
-obj-$(CONFIG_TILE_USB) += usb.o
-obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o
-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
-obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-
-obj-y += vdso/
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c
deleted file mode 100644
index 375e7c321eef..000000000000
--- a/arch/tile/kernel/asm-offsets.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Generates definitions from c-type structures used by assembly sources.
- */
-
-/* Check for compatible compiler early in the build. */
-#ifdef CONFIG_TILEGX
-# ifndef __tilegx__
-# error Can only build TILE-Gx configurations with tilegx compiler
-# endif
-# ifndef __LP64__
-# error Must not specify -m32 when building the TILE-Gx kernel
-# endif
-#else
-# ifdef __tilegx__
-# error Can not build TILEPro configurations with tilegx compiler
-# endif
-#endif
-
-#include <linux/kbuild.h>
-#include <linux/thread_info.h>
-#include <linux/sched.h>
-#include <linux/hardirq.h>
-#include <linux/ptrace.h>
-#include <hv/hypervisor.h>
-
-void foo(void)
-{
- DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET,
- offsetof(struct single_step_state, buffer));
- DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET,
- offsetof(struct single_step_state, flags));
- DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET,
- offsetof(struct single_step_state, orig_pc));
- DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET,
- offsetof(struct single_step_state, next_pc));
- DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET,
- offsetof(struct single_step_state, branch_next_pc));
- DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET,
- offsetof(struct single_step_state, update_value));
-
- DEFINE(THREAD_INFO_TASK_OFFSET,
- offsetof(struct thread_info, task));
- DEFINE(THREAD_INFO_FLAGS_OFFSET,
- offsetof(struct thread_info, flags));
- DEFINE(THREAD_INFO_STATUS_OFFSET,
- offsetof(struct thread_info, status));
- DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
- offsetof(struct thread_info, homecache_cpu));
- DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
- offsetof(struct thread_info, preempt_count));
- DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
- offsetof(struct thread_info, step_state));
-#ifdef __tilegx__
- DEFINE(THREAD_INFO_UNALIGN_JIT_BASE_OFFSET,
- offsetof(struct thread_info, unalign_jit_base));
- DEFINE(THREAD_INFO_UNALIGN_JIT_TMP_OFFSET,
- offsetof(struct thread_info, unalign_jit_tmp));
-#endif
-
- DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET,
- offsetof(struct task_struct, thread.ksp));
- DEFINE(TASK_STRUCT_THREAD_PC_OFFSET,
- offsetof(struct task_struct, thread.pc));
-
- DEFINE(HV_TOPOLOGY_WIDTH_OFFSET,
- offsetof(HV_Topology, width));
- DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET,
- offsetof(HV_Topology, height));
-
- DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET,
- offsetof(irq_cpustat_t, irq_syscall_count));
-}
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
deleted file mode 100644
index f8b74ca83b92..000000000000
--- a/arch/tile/kernel/backtrace.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <asm/byteorder.h>
-#include <asm/backtrace.h>
-#include <asm/tile-desc.h>
-#include <arch/abi.h>
-
-#ifdef __tilegx__
-#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE
-#define tile_decoded_instruction tilegx_decoded_instruction
-#define tile_mnemonic tilegx_mnemonic
-#define parse_insn_tile parse_insn_tilegx
-#define TILE_OPC_IRET TILEGX_OPC_IRET
-#define TILE_OPC_ADDI TILEGX_OPC_ADDI
-#define TILE_OPC_ADDLI TILEGX_OPC_ADDLI
-#define TILE_OPC_INFO TILEGX_OPC_INFO
-#define TILE_OPC_INFOL TILEGX_OPC_INFOL
-#define TILE_OPC_JRP TILEGX_OPC_JRP
-#define TILE_OPC_MOVE TILEGX_OPC_MOVE
-#define OPCODE_STORE TILEGX_OPC_ST
-typedef long long bt_int_reg_t;
-#else
-#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE
-#define tile_decoded_instruction tilepro_decoded_instruction
-#define tile_mnemonic tilepro_mnemonic
-#define parse_insn_tile parse_insn_tilepro
-#define TILE_OPC_IRET TILEPRO_OPC_IRET
-#define TILE_OPC_ADDI TILEPRO_OPC_ADDI
-#define TILE_OPC_ADDLI TILEPRO_OPC_ADDLI
-#define TILE_OPC_INFO TILEPRO_OPC_INFO
-#define TILE_OPC_INFOL TILEPRO_OPC_INFOL
-#define TILE_OPC_JRP TILEPRO_OPC_JRP
-#define TILE_OPC_MOVE TILEPRO_OPC_MOVE
-#define OPCODE_STORE TILEPRO_OPC_SW
-typedef int bt_int_reg_t;
-#endif
-
-/* A decoded bundle used for backtracer analysis. */
-struct BacktraceBundle {
- tile_bundle_bits bits;
- int num_insns;
- struct tile_decoded_instruction
- insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
-};
-
-
-/* Locates an instruction inside the given bundle that
- * has the specified mnemonic, and whose first 'num_operands_to_match'
- * operands exactly match those in 'operand_values'.
- */
-static const struct tile_decoded_instruction *find_matching_insn(
- const struct BacktraceBundle *bundle,
- tile_mnemonic mnemonic,
- const int *operand_values,
- int num_operands_to_match)
-{
- int i, j;
- bool match;
-
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->mnemonic != mnemonic)
- continue;
-
- match = true;
- for (j = 0; j < num_operands_to_match; j++) {
- if (operand_values[j] != insn->operand_values[j]) {
- match = false;
- break;
- }
- }
-
- if (match)
- return insn;
- }
-
- return NULL;
-}
-
-/* Does this bundle contain an 'iret' instruction? */
-static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
-{
- return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
-}
-
-/* Does this bundle contain an 'addi sp, sp, OFFSET' or
- * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
- */
-static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
-{
- static const int vals[2] = { TREG_SP, TREG_SP };
-
- const struct tile_decoded_instruction *insn =
- find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
- if (insn == NULL)
- insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
-#ifdef __tilegx__
- if (insn == NULL)
- insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2);
- if (insn == NULL)
- insn = find_matching_insn(bundle, TILEGX_OPC_ADDXI, vals, 2);
-#endif
- if (insn == NULL)
- return false;
-
- *adjust = insn->operand_values[2];
- return true;
-}
-
-/* Does this bundle contain any 'info OP' or 'infol OP'
- * instruction, and if so, what are their OP? Note that OP is interpreted
- * as an unsigned value by this code since that's what the caller wants.
- * Returns the number of info ops found.
- */
-static int bt_get_info_ops(const struct BacktraceBundle *bundle,
- int operands[MAX_INFO_OPS_PER_BUNDLE])
-{
- int num_ops = 0;
- int i;
-
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->mnemonic == TILE_OPC_INFO ||
- insn->opcode->mnemonic == TILE_OPC_INFOL) {
- operands[num_ops++] = insn->operand_values[0];
- }
- }
-
- return num_ops;
-}
-
-/* Does this bundle contain a jrp instruction, and if so, to which
- * register is it jumping?
- */
-static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
-{
- const struct tile_decoded_instruction *insn =
- find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
- if (insn == NULL)
- return false;
-
- *target_reg = insn->operand_values[0];
- return true;
-}
-
-/* Does this bundle modify the specified register in any way? */
-static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
-{
- int i, j;
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->implicitly_written_register == reg)
- return true;
-
- for (j = 0; j < insn->opcode->num_operands; j++)
- if (insn->operands[j]->is_dest_reg &&
- insn->operand_values[j] == reg)
- return true;
- }
-
- return false;
-}
-
-/* Does this bundle modify sp? */
-static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
-{
- return bt_modifies_reg(bundle, TREG_SP);
-}
-
-/* Does this bundle modify lr? */
-static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
-{
- return bt_modifies_reg(bundle, TREG_LR);
-}
-
-/* Does this bundle contain the instruction 'move fp, sp'? */
-static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
-{
- static const int vals[2] = { 52, TREG_SP };
- return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
-}
-
-/* Does this bundle contain a store of lr to sp? */
-static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
-{
- static const int vals[2] = { TREG_SP, TREG_LR };
- return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL;
-}
-
-#ifdef __tilegx__
-/* Track moveli values placed into registers. */
-static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
- int moveli_args[])
-{
- int i;
- for (i = 0; i < bundle->num_insns; i++) {
- const struct tile_decoded_instruction *insn =
- &bundle->insns[i];
-
- if (insn->opcode->mnemonic == TILEGX_OPC_MOVELI) {
- int reg = insn->operand_values[0];
- moveli_args[reg] = insn->operand_values[1];
- }
- }
-}
-
-/* Does this bundle contain an 'add sp, sp, reg' instruction
- * from a register that we saw a moveli into, and if so, what
- * is the value in the register?
- */
-static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust,
- int moveli_args[])
-{
- static const int vals[2] = { TREG_SP, TREG_SP };
-
- const struct tile_decoded_instruction *insn =
- find_matching_insn(bundle, TILEGX_OPC_ADDX, vals, 2);
- if (insn) {
- int reg = insn->operand_values[2];
- if (moveli_args[reg]) {
- *adjust = moveli_args[reg];
- return true;
- }
- }
- return false;
-}
-#endif
-
-/* Locates the caller's PC and SP for a program starting at the
- * given address.
- */
-static void find_caller_pc_and_caller_sp(CallerLocation *location,
- const unsigned long start_pc,
- BacktraceMemoryReader read_memory_func,
- void *read_memory_func_extra)
-{
- /* Have we explicitly decided what the sp is,
- * rather than just the default?
- */
- bool sp_determined = false;
-
- /* Has any bundle seen so far modified lr? */
- bool lr_modified = false;
-
- /* Have we seen a move from sp to fp? */
- bool sp_moved_to_r52 = false;
-
- /* Have we seen a terminating bundle? */
- bool seen_terminating_bundle = false;
-
- /* Cut down on round-trip reading overhead by reading several
- * bundles at a time.
- */
- tile_bundle_bits prefetched_bundles[32];
- int num_bundles_prefetched = 0;
- int next_bundle = 0;
- unsigned long pc;
-
-#ifdef __tilegx__
- /* Naively try to track moveli values to support addx for -m32. */
- int moveli_args[TILEGX_NUM_REGISTERS] = { 0 };
-#endif
-
- /* Default to assuming that the caller's sp is the current sp.
- * This is necessary to handle the case where we start backtracing
- * right at the end of the epilog.
- */
- location->sp_location = SP_LOC_OFFSET;
- location->sp_offset = 0;
-
- /* Default to having no idea where the caller PC is. */
- location->pc_location = PC_LOC_UNKNOWN;
-
- /* Don't even try if the PC is not aligned. */
- if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
- return;
-
- for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
-
- struct BacktraceBundle bundle;
- int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
- int one_ago, jrp_reg;
- bool has_jrp;
-
- if (next_bundle >= num_bundles_prefetched) {
- /* Prefetch some bytes, but don't cross a page
- * boundary since that might cause a read failure we
- * don't care about if we only need the first few
- * bytes. Note: we don't care what the actual page
- * size is; using the minimum possible page size will
- * prevent any problems.
- */
- unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
- if (bytes_to_prefetch > sizeof prefetched_bundles)
- bytes_to_prefetch = sizeof prefetched_bundles;
-
- if (!read_memory_func(prefetched_bundles, pc,
- bytes_to_prefetch,
- read_memory_func_extra)) {
- if (pc == start_pc) {
- /* The program probably called a bad
- * address, such as a NULL pointer.
- * So treat this as if we are at the
- * start of the function prolog so the
- * backtrace will show how we got here.
- */
- location->pc_location = PC_LOC_IN_LR;
- return;
- }
-
- /* Unreadable address. Give up. */
- break;
- }
-
- next_bundle = 0;
- num_bundles_prefetched =
- bytes_to_prefetch / sizeof(tile_bundle_bits);
- }
-
- /*
- * Decode the next bundle.
- * TILE always stores instruction bundles in little-endian
- * mode, even when the chip is running in big-endian mode.
- */
- bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
- bundle.num_insns =
- parse_insn_tile(bundle.bits, pc, bundle.insns);
- num_info_ops = bt_get_info_ops(&bundle, info_operands);
-
- /* First look at any one_ago info ops if they are interesting,
- * since they should shadow any non-one-ago info ops.
- */
- for (one_ago = (pc != start_pc) ? 1 : 0;
- one_ago >= 0; one_ago--) {
- int i;
- for (i = 0; i < num_info_ops; i++) {
- int info_operand = info_operands[i];
- if (info_operand < CALLER_UNKNOWN_BASE) {
- /* Weird; reserved value, ignore it. */
- continue;
- }
-
- /* Skip info ops which are not in the
- * "one_ago" mode we want right now.
- */
- if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
- != (one_ago != 0))
- continue;
-
- /* Clear the flag to make later checking
- * easier. */
- info_operand &= ~ONE_BUNDLE_AGO_FLAG;
-
- /* Default to looking at PC_IN_LR_FLAG. */
- if (info_operand & PC_IN_LR_FLAG)
- location->pc_location =
- PC_LOC_IN_LR;
- else
- location->pc_location =
- PC_LOC_ON_STACK;
-
- switch (info_operand) {
- case CALLER_UNKNOWN_BASE:
- location->pc_location = PC_LOC_UNKNOWN;
- location->sp_location = SP_LOC_UNKNOWN;
- return;
-
- case CALLER_SP_IN_R52_BASE:
- case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
- location->sp_location = SP_LOC_IN_R52;
- return;
-
- default:
- {
- const unsigned int val = info_operand
- - CALLER_SP_OFFSET_BASE;
- const unsigned int sp_offset =
- (val >> NUM_INFO_OP_FLAGS) * 8;
- if (sp_offset < 32768) {
- /* This is a properly encoded
- * SP offset. */
- location->sp_location =
- SP_LOC_OFFSET;
- location->sp_offset =
- sp_offset;
- return;
- } else {
- /* This looked like an SP
- * offset, but it's outside
- * the legal range, so this
- * must be an unrecognized
- * info operand. Ignore it.
- */
- }
- }
- break;
- }
- }
- }
-
- if (seen_terminating_bundle) {
- /* We saw a terminating bundle during the previous
- * iteration, so we were only looking for an info op.
- */
- break;
- }
-
- if (bundle.bits == 0) {
- /* Wacky terminating bundle. Stop looping, and hope
- * we've already seen enough to find the caller.
- */
- break;
- }
-
- /*
- * Try to determine caller's SP.
- */
-
- if (!sp_determined) {
- int adjust;
- if (bt_has_addi_sp(&bundle, &adjust)
-#ifdef __tilegx__
- || bt_has_add_sp(&bundle, &adjust, moveli_args)
-#endif
- ) {
- location->sp_location = SP_LOC_OFFSET;
-
- if (adjust <= 0) {
- /* We are in prolog about to adjust
- * SP. */
- location->sp_offset = 0;
- } else {
- /* We are in epilog restoring SP. */
- location->sp_offset = adjust;
- }
-
- sp_determined = true;
- } else {
- if (bt_has_move_r52_sp(&bundle)) {
- /* Maybe in prolog, creating an
- * alloca-style frame. But maybe in
- * the middle of a fixed-size frame
- * clobbering r52 with SP.
- */
- sp_moved_to_r52 = true;
- }
-
- if (bt_modifies_sp(&bundle)) {
- if (sp_moved_to_r52) {
- /* We saw SP get saved into
- * r52 earlier (or now), which
- * must have been in the
- * prolog, so we now know that
- * SP is still holding the
- * caller's sp value.
- */
- location->sp_location =
- SP_LOC_OFFSET;
- location->sp_offset = 0;
- } else {
- /* Someone must have saved
- * aside the caller's SP value
- * into r52, so r52 holds the
- * current value.
- */
- location->sp_location =
- SP_LOC_IN_R52;
- }
- sp_determined = true;
- }
- }
-
-#ifdef __tilegx__
- /* Track moveli arguments for -m32 mode. */
- bt_update_moveli(&bundle, moveli_args);
-#endif
- }
-
- if (bt_has_iret(&bundle)) {
- /* This is a terminating bundle. */
- seen_terminating_bundle = true;
- continue;
- }
-
- /*
- * Try to determine caller's PC.
- */
-
- jrp_reg = -1;
- has_jrp = bt_has_jrp(&bundle, &jrp_reg);
- if (has_jrp)
- seen_terminating_bundle = true;
-
- if (location->pc_location == PC_LOC_UNKNOWN) {
- if (has_jrp) {
- if (jrp_reg == TREG_LR && !lr_modified) {
- /* Looks like a leaf function, or else
- * lr is already restored. */
- location->pc_location =
- PC_LOC_IN_LR;
- } else {
- location->pc_location =
- PC_LOC_ON_STACK;
- }
- } else if (bt_has_sw_sp_lr(&bundle)) {
- /* In prolog, spilling initial lr to stack. */
- location->pc_location = PC_LOC_IN_LR;
- } else if (bt_modifies_lr(&bundle)) {
- lr_modified = true;
- }
- }
- }
-}
-
-/* Initializes a backtracer to start from the given location.
- *
- * If the frame pointer cannot be determined it is set to -1.
- *
- * state: The state to be filled in.
- * read_memory_func: A callback that reads memory.
- * read_memory_func_extra: An arbitrary argument to read_memory_func.
- * pc: The current PC.
- * lr: The current value of the 'lr' register.
- * sp: The current value of the 'sp' register.
- * r52: The current value of the 'r52' register.
- */
-void backtrace_init(BacktraceIterator *state,
- BacktraceMemoryReader read_memory_func,
- void *read_memory_func_extra,
- unsigned long pc, unsigned long lr,
- unsigned long sp, unsigned long r52)
-{
- CallerLocation location;
- unsigned long fp, initial_frame_caller_pc;
-
- /* Find out where we are in the initial frame. */
- find_caller_pc_and_caller_sp(&location, pc,
- read_memory_func, read_memory_func_extra);
-
- switch (location.sp_location) {
- case SP_LOC_UNKNOWN:
- /* Give up. */
- fp = -1;
- break;
-
- case SP_LOC_IN_R52:
- fp = r52;
- break;
-
- case SP_LOC_OFFSET:
- fp = sp + location.sp_offset;
- break;
-
- default:
- /* Give up. */
- fp = -1;
- break;
- }
-
- /* If the frame pointer is not aligned to the basic word size
- * something terrible happened and we should mark it as invalid.
- */
- if (fp % sizeof(bt_int_reg_t) != 0)
- fp = -1;
-
- /* -1 means "don't know initial_frame_caller_pc". */
- initial_frame_caller_pc = -1;
-
- switch (location.pc_location) {
- case PC_LOC_UNKNOWN:
- /* Give up. */
- fp = -1;
- break;
-
- case PC_LOC_IN_LR:
- if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
- /* Give up. */
- fp = -1;
- } else {
- initial_frame_caller_pc = lr;
- }
- break;
-
- case PC_LOC_ON_STACK:
- /* Leave initial_frame_caller_pc as -1,
- * meaning check the stack.
- */
- break;
-
- default:
- /* Give up. */
- fp = -1;
- break;
- }
-
- state->pc = pc;
- state->sp = sp;
- state->fp = fp;
- state->initial_frame_caller_pc = initial_frame_caller_pc;
- state->read_memory_func = read_memory_func;
- state->read_memory_func_extra = read_memory_func_extra;
-}
-
-/* Handle the case where the register holds more bits than the VA. */
-static bool valid_addr_reg(bt_int_reg_t reg)
-{
- return ((unsigned long)reg == reg);
-}
-
-/* Advances the backtracing state to the calling frame, returning
- * true iff successful.
- */
-bool backtrace_next(BacktraceIterator *state)
-{
- unsigned long next_fp, next_pc;
- bt_int_reg_t next_frame[2];
-
- if (state->fp == -1) {
- /* No parent frame. */
- return false;
- }
-
- /* Try to read the frame linkage data chaining to the next function. */
- if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
- state->read_memory_func_extra)) {
- return false;
- }
-
- next_fp = next_frame[1];
- if (!valid_addr_reg(next_frame[1]) ||
- next_fp % sizeof(bt_int_reg_t) != 0) {
- /* Caller's frame pointer is suspect, so give up. */
- return false;
- }
-
- if (state->initial_frame_caller_pc != -1) {
- /* We must be in the initial stack frame and already know the
- * caller PC.
- */
- next_pc = state->initial_frame_caller_pc;
-
- /* Force reading stack next time, in case we were in the
- * initial frame. We don't do this above just to paranoidly
- * avoid changing the struct at all when we return false.
- */
- state->initial_frame_caller_pc = -1;
- } else {
- /* Get the caller PC from the frame linkage area. */
- next_pc = next_frame[0];
- if (!valid_addr_reg(next_frame[0]) || next_pc == 0 ||
- next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
- /* The PC is suspect, so give up. */
- return false;
- }
- }
-
- /* Update state to become the caller's stack frame. */
- state->pc = next_pc;
- state->sp = state->fp;
- state->fp = next_fp;
-
- return true;
-}
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
deleted file mode 100644
index bdaf71d31a4a..000000000000
--- a/arch/tile/kernel/compat.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/* Adjust unistd.h to provide 32-bit numbers and functions. */
-#define __SYSCALL_COMPAT
-
-#include <linux/compat.h>
-#include <linux/syscalls.h>
-#include <linux/kdev_t.h>
-#include <linux/fs.h>
-#include <linux/fcntl.h>
-#include <linux/uaccess.h>
-#include <linux/signal.h>
-#include <asm/syscalls.h>
-#include <asm/byteorder.h>
-
-/*
- * Syscalls that take 64-bit numbers traditionally take them in 32-bit
- * "high" and "low" value parts on 32-bit architectures.
- * In principle, one could imagine passing some register arguments as
- * fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to
- * adopt the usual convention.
- */
-
-#ifdef __BIG_ENDIAN
-#define SYSCALL_PAIR(name) u32, name ## _hi, u32, name ## _lo
-#else
-#define SYSCALL_PAIR(name) u32, name ## _lo, u32, name ## _hi
-#endif
-
-COMPAT_SYSCALL_DEFINE4(truncate64, char __user *, filename, u32, dummy,
- SYSCALL_PAIR(length))
-{
- return sys_truncate(filename, ((loff_t)length_hi << 32) | length_lo);
-}
-
-COMPAT_SYSCALL_DEFINE4(ftruncate64, unsigned int, fd, u32, dummy,
- SYSCALL_PAIR(length))
-{
- return sys_ftruncate(fd, ((loff_t)length_hi << 32) | length_lo);
-}
-
-COMPAT_SYSCALL_DEFINE6(pread64, unsigned int, fd, char __user *, ubuf,
- size_t, count, u32, dummy, SYSCALL_PAIR(offset))
-{
- return sys_pread64(fd, ubuf, count,
- ((loff_t)offset_hi << 32) | offset_lo);
-}
-
-COMPAT_SYSCALL_DEFINE6(pwrite64, unsigned int, fd, char __user *, ubuf,
- size_t, count, u32, dummy, SYSCALL_PAIR(offset))
-{
- return sys_pwrite64(fd, ubuf, count,
- ((loff_t)offset_hi << 32) | offset_lo);
-}
-
-COMPAT_SYSCALL_DEFINE6(sync_file_range2, int, fd, unsigned int, flags,
- SYSCALL_PAIR(offset), SYSCALL_PAIR(nbytes))
-{
- return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo,
- ((loff_t)nbytes_hi << 32) | nbytes_lo,
- flags);
-}
-
-COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode,
- SYSCALL_PAIR(offset), SYSCALL_PAIR(len))
-{
- return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo,
- ((loff_t)len_hi << 32) | len_lo);
-}
-
-/*
- * Avoid bug in generic sys_llseek() that specifies offset_high and
- * offset_low as "unsigned long", thus making it possible to pass
- * a sign-extended high 32 bits in offset_low.
- * Note that we do not use SYSCALL_PAIR here since glibc passes the
- * high and low parts explicitly in that order.
- */
-COMPAT_SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned int, offset_high,
- unsigned int, offset_low, loff_t __user *, result,
- unsigned int, origin)
-{
- return sys_llseek(fd, offset_high, offset_low, result, origin);
-}
-
-/* Provide the compat syscall number to call mapping. */
-#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
-
-/* See comments in sys.c */
-#define compat_sys_fadvise64_64 sys32_fadvise64_64
-#define compat_sys_readahead sys32_readahead
-#define sys_llseek compat_sys_llseek
-
-/* Call the assembly trampolines where necessary. */
-#define compat_sys_rt_sigreturn _compat_sys_rt_sigreturn
-#define sys_clone _sys_clone
-
-/*
- * Note that we can't include <linux/unistd.h> here since the header
- * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
- */
-void *compat_sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls-1] = sys_ni_syscall,
-#include <asm/unistd.h>
-};
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
deleted file mode 100644
index a703bd0e0488..000000000000
--- a/arch/tile/kernel/compat_signal.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
-#include <linux/compat.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <asm/processor.h>
-#include <asm/ucontext.h>
-#include <asm/sigframe.h>
-#include <asm/syscalls.h>
-#include <asm/vdso.h>
-#include <arch/interrupts.h>
-
-struct compat_ucontext {
- compat_ulong_t uc_flags;
- compat_uptr_t uc_link;
- struct compat_sigaltstack uc_stack;
- struct sigcontext uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-struct compat_rt_sigframe {
- unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */
- struct compat_siginfo info;
- struct compat_ucontext uc;
-};
-
-/* The assembly shim for this function arranges to ignore the return value. */
-long compat_sys_rt_sigreturn(void)
-{
- struct pt_regs *regs = current_pt_regs();
- struct compat_rt_sigframe __user *frame =
- (struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
- sigset_t set;
-
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
- goto badframe;
-
- if (compat_restore_altstack(&frame->uc.uc_stack))
- goto badframe;
-
- return 0;
-
-badframe:
- signal_fault("bad sigreturn frame", regs, frame, 0);
- return 0;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *compat_get_sigframe(struct k_sigaction *ka,
- struct pt_regs *regs,
- size_t frame_size)
-{
- unsigned long sp;
-
- /* Default to using normal stack */
- sp = (unsigned long)compat_ptr(regs->sp);
-
- /*
- * If we are on the alternate signal stack and would overflow
- * it, don't. Return an always-bogus address instead so we
- * will die with SIGSEGV.
- */
- if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
- return (void __user __force *)-1UL;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(sp) == 0)
- sp = current->sas_ss_sp + current->sas_ss_size;
- }
-
- sp -= frame_size;
- /*
- * Align the stack pointer according to the TILE ABI,
- * i.e. so that on function entry (sp & 15) == 0.
- */
- sp &= -16UL;
- return (void __user *) sp;
-}
-
-int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
-{
- unsigned long restorer;
- struct compat_rt_sigframe __user *frame;
- int err = 0, sig = ksig->sig;
-
- frame = compat_get_sigframe(&ksig->ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto err;
-
- /* Always write at least the signal number for the stack backtracer. */
- if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- /* At sigreturn time, restore the callee-save registers too. */
- err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
- regs->flags |= PT_FLAGS_RESTORE_REGS;
- } else {
- err |= __put_user(ksig->info.si_signo, &frame->info.si_signo);
- }
-
- /* Create the ucontext. */
- err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
- goto err;
-
- restorer = VDSO_SYM(&__vdso_rt_sigreturn);
- if (ksig->ka.sa.sa_flags & SA_RESTORER)
- restorer = ptr_to_compat_reg(ksig->ka.sa.sa_restorer);
-
- /*
- * Set up registers for signal handler.
- * Registers that we don't modify keep the value they had from
- * user-space at the time we took the signal.
- * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
- * since some things rely on this (e.g. glibc's debug/segfault.c).
- */
- regs->pc = ptr_to_compat_reg(ksig->ka.sa.sa_handler);
- regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
- regs->sp = ptr_to_compat_reg(frame);
- regs->lr = restorer;
- regs->regs[0] = (unsigned long) sig;
- regs->regs[1] = ptr_to_compat_reg(&frame->info);
- regs->regs[2] = ptr_to_compat_reg(&frame->uc);
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- return 0;
-
-err:
- trace_unhandled_signal("bad sigreturn frame", regs,
- (unsigned long)frame, SIGSEGV);
- return -EFAULT;
-}
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
deleted file mode 100644
index aefb2c086726..000000000000
--- a/arch/tile/kernel/early_printk.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/console.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/irqflags.h>
-#include <linux/printk.h>
-#include <asm/setup.h>
-#include <hv/hypervisor.h>
-
-static void early_hv_write(struct console *con, const char *s, unsigned n)
-{
- tile_console_write(s, n);
-
- /*
- * Convert NL to NLCR (close enough to CRNL) during early boot.
- * We assume newlines are at the ends of strings, which turns out
- * to be good enough for early boot console output.
- */
- if (n && s[n-1] == '\n')
- tile_console_write("\r", 1);
-}
-
-static struct console early_hv_console = {
- .name = "earlyhv",
- .write = early_hv_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1,
-};
-
-void early_panic(const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- arch_local_irq_disable_all();
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- early_printk("Kernel panic - not syncing: %pV", &vaf);
-
- va_end(args);
-
- dump_stack();
- hv_halt();
-}
-
-static int __init setup_early_printk(char *str)
-{
- if (early_console)
- return 1;
-
- early_console = &early_hv_console;
- register_console(early_console);
-
- return 0;
-}
-
-early_param("earlyprintk", setup_early_printk);
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
deleted file mode 100644
index 101de132e363..000000000000
--- a/arch/tile/kernel/entry.S
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/unistd.h>
-#include <asm/irqflags.h>
-#include <asm/processor.h>
-#include <arch/abi.h>
-#include <arch/spr_def.h>
-
-#ifdef __tilegx__
-#define bnzt bnezt
-#endif
-
-STD_ENTRY(current_text_addr)
- { move r0, lr; jrp lr }
- STD_ENDPROC(current_text_addr)
-
-STD_ENTRY(KBacktraceIterator_init_current)
- { move r2, lr; lnk r1 }
- { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
- { move r3, sp; j _KBacktraceIterator_init_current }
- jrp lr /* keep backtracer happy */
- STD_ENDPROC(KBacktraceIterator_init_current)
-
-/* Loop forever on a nap during SMP boot. */
-STD_ENTRY(smp_nap)
- nap
- nop /* avoid provoking the icache prefetch with a jump */
- j smp_nap /* we are not architecturally guaranteed not to exit nap */
- jrp lr /* clue in the backtracer */
- STD_ENDPROC(smp_nap)
-
-/*
- * Enable interrupts racelessly and then nap until interrupted.
- * Architecturally, we are guaranteed that enabling interrupts via
- * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
- * This function's _cpu_idle_nap address is special; see intvec.S.
- * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
- * as a result return to the function that called _cpu_idle().
- */
-STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
- movei r1, 1
- IRQ_ENABLE_LOAD(r2, r3)
- mtspr INTERRUPT_CRITICAL_SECTION, r1
- IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
- mtspr INTERRUPT_CRITICAL_SECTION, zero
- .global _cpu_idle_nap
-_cpu_idle_nap:
- nap
- nop /* avoid provoking the icache prefetch with a jump */
- jrp lr
- STD_ENDPROC(_cpu_idle)
diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c
deleted file mode 100644
index b827a418b155..000000000000
--- a/arch/tile/kernel/ftrace.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx specific ftrace support
- */
-
-#include <linux/ftrace.h>
-#include <linux/uaccess.h>
-
-#include <asm/cacheflush.h>
-#include <asm/ftrace.h>
-#include <asm/sections.h>
-#include <asm/insn.h>
-
-#include <arch/opcode.h>
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-static int machine_stopped __read_mostly;
-
-int ftrace_arch_code_modify_prepare(void)
-{
- machine_stopped = 1;
- return 0;
-}
-
-int ftrace_arch_code_modify_post_process(void)
-{
- flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
- machine_stopped = 0;
- return 0;
-}
-
-/*
- * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
- * tracer just add one cycle overhead to every kernel function when disabled.
- */
-static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
- bool link)
-{
- tilegx_bundle_bits opcode_x0, opcode_x1;
- long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
-
- if (link) {
- /* opcode: jal addr */
- opcode_x1 =
- create_Opcode_X1(JUMP_OPCODE_X1) |
- create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
- create_JumpOff_X1(pcrel_by_instr);
- } else {
- /* opcode: j addr */
- opcode_x1 =
- create_Opcode_X1(JUMP_OPCODE_X1) |
- create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
- create_JumpOff_X1(pcrel_by_instr);
- }
-
- /*
- * Also put { move r10, lr; jal ftrace_stub } in a bundle, which
- * is used to replace the instruction in address ftrace_call.
- */
- if (addr == FTRACE_ADDR || addr == (unsigned long)ftrace_stub) {
- /* opcode: or r10, lr, zero */
- opcode_x0 =
- create_Dest_X0(10) |
- create_SrcA_X0(TREG_LR) |
- create_SrcB_X0(TREG_ZERO) |
- create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
- create_Opcode_X0(RRR_0_OPCODE_X0);
- } else {
- /* opcode: fnop */
- opcode_x0 =
- create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
- create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
- create_Opcode_X0(RRR_0_OPCODE_X0);
- }
-
- return opcode_x1 | opcode_x0;
-}
-
-static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
-{
- return NOP();
-}
-
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
-{
- return ftrace_gen_branch(pc, addr, true);
-}
-
-static int ftrace_modify_code(unsigned long pc, unsigned long old,
- unsigned long new)
-{
- unsigned long pc_wr;
-
- /* Check if the address is in kernel text space and module space. */
- if (!kernel_text_address(pc))
- return -EINVAL;
-
- /* Operate on writable kernel text mapping. */
- pc_wr = ktext_writable_addr(pc);
-
- if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
- return -EPERM;
-
- smp_wmb();
-
- if (!machine_stopped && num_online_cpus() > 1)
- flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
-
- return 0;
-}
-
-int ftrace_update_ftrace_func(ftrace_func_t func)
-{
- unsigned long pc, old;
- unsigned long new;
- int ret;
-
- pc = (unsigned long)&ftrace_call;
- memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
- new = ftrace_call_replace(pc, (unsigned long)func);
-
- ret = ftrace_modify_code(pc, old, new);
-
- return ret;
-}
-
-int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long new, old;
- unsigned long ip = rec->ip;
-
- old = ftrace_nop_replace(rec);
- new = ftrace_call_replace(ip, addr);
-
- return ftrace_modify_code(rec->ip, old, new);
-}
-
-int ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long ip = rec->ip;
- unsigned long old;
- unsigned long new;
- int ret;
-
- old = ftrace_call_replace(ip, addr);
- new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new);
-
- return ret;
-}
-
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
- unsigned long frame_pointer)
-{
- unsigned long return_hooker = (unsigned long) &return_to_handler;
- struct ftrace_graph_ent trace;
- unsigned long old;
- int err;
-
- if (unlikely(atomic_read(&current->tracing_graph_pause)))
- return;
-
- old = *parent;
- *parent = return_hooker;
-
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, NULL);
- if (err == -EBUSY) {
- *parent = old;
- return;
- }
-
- trace.func = self_addr;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
- *parent = old;
- }
-}
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-extern unsigned long ftrace_graph_call;
-
-static int __ftrace_modify_caller(unsigned long *callsite,
- void (*func) (void), bool enable)
-{
- unsigned long caller_fn = (unsigned long) func;
- unsigned long pc = (unsigned long) callsite;
- unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
- unsigned long nop = NOP();
- unsigned long old = enable ? nop : branch;
- unsigned long new = enable ? branch : nop;
-
- return ftrace_modify_code(pc, old, new);
-}
-
-static int ftrace_modify_graph_caller(bool enable)
-{
- int ret;
-
- ret = __ftrace_modify_caller(&ftrace_graph_call,
- ftrace_graph_caller,
- enable);
-
- return ret;
-}
-
-int ftrace_enable_ftrace_graph_caller(void)
-{
- return ftrace_modify_graph_caller(true);
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
- return ftrace_modify_graph_caller(false);
-}
-#endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
deleted file mode 100644
index 2fd1694ac1d0..000000000000
--- a/arch/tile/kernel/hardwall.c
+++ /dev/null
@@ -1,1096 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/rwsem.h>
-#include <linux/kprobes.h>
-#include <linux/sched.h>
-#include <linux/hardirq.h>
-#include <linux/uaccess.h>
-#include <linux/smp.h>
-#include <linux/cdev.h>
-#include <linux/compat.h>
-#include <asm/hardwall.h>
-#include <asm/traps.h>
-#include <asm/siginfo.h>
-#include <asm/irq_regs.h>
-
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-
-/*
- * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
- * We use "hardwall" nomenclature throughout for historical reasons.
- * The lock here controls access to the list data structure as well as
- * to the items on the list.
- */
-struct hardwall_type {
- int index;
- int is_xdn;
- int is_idn;
- int disabled;
- const char *name;
- struct list_head list;
- spinlock_t lock;
- struct proc_dir_entry *proc_dir;
-};
-
-enum hardwall_index {
- HARDWALL_UDN = 0,
-#ifndef __tilepro__
- HARDWALL_IDN = 1,
- HARDWALL_IPI = 2,
-#endif
- _HARDWALL_TYPES
-};
-
-static struct hardwall_type hardwall_types[] = {
- { /* user-space access to UDN */
- 0,
- 1,
- 0,
- 0,
- "udn",
- LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
- __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
- NULL
- },
-#ifndef __tilepro__
- { /* user-space access to IDN */
- 1,
- 1,
- 1,
- 1, /* disabled pending hypervisor support */
- "idn",
- LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
- __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
- NULL
- },
- { /* access to user-space IPI */
- 2,
- 0,
- 0,
- 0,
- "ipi",
- LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
- __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
- NULL
- },
-#endif
-};
-
-/*
- * This data structure tracks the cpu data, etc., associated
- * one-to-one with a "struct file *" from opening a hardwall device file.
- * Note that the file's private data points back to this structure.
- */
-struct hardwall_info {
- struct list_head list; /* for hardwall_types.list */
- struct list_head task_head; /* head of tasks in this hardwall */
- struct hardwall_type *type; /* type of this resource */
- struct cpumask cpumask; /* cpus reserved */
- int id; /* integer id for this hardwall */
- int teardown_in_progress; /* are we tearing this one down? */
-
- /* Remaining fields only valid for user-network resources. */
- int ulhc_x; /* upper left hand corner x coord */
- int ulhc_y; /* upper left hand corner y coord */
- int width; /* rectangle width */
- int height; /* rectangle height */
-#if CHIP_HAS_REV1_XDN()
- atomic_t xdn_pending_count; /* cores in phase 1 of drain */
-#endif
-};
-
-
-/* /proc/tile/hardwall */
-static struct proc_dir_entry *hardwall_proc_dir;
-
-/* Functions to manage files in /proc/tile/hardwall. */
-static void hardwall_add_proc(struct hardwall_info *);
-static void hardwall_remove_proc(struct hardwall_info *);
-
-/* Allow disabling UDN access. */
-static int __init noudn(char *str)
-{
- pr_info("User-space UDN access is disabled\n");
- hardwall_types[HARDWALL_UDN].disabled = 1;
- return 0;
-}
-early_param("noudn", noudn);
-
-#ifndef __tilepro__
-/* Allow disabling IDN access. */
-static int __init noidn(char *str)
-{
- pr_info("User-space IDN access is disabled\n");
- hardwall_types[HARDWALL_IDN].disabled = 1;
- return 0;
-}
-early_param("noidn", noidn);
-
-/* Allow disabling IPI access. */
-static int __init noipi(char *str)
-{
- pr_info("User-space IPI access is disabled\n");
- hardwall_types[HARDWALL_IPI].disabled = 1;
- return 0;
-}
-early_param("noipi", noipi);
-#endif
-
-
-/*
- * Low-level primitives for UDN/IDN
- */
-
-#ifdef __tilepro__
-#define mtspr_XDN(hwt, name, val) \
- do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
-#define mtspr_MPL_XDN(hwt, name, val) \
- do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
-#define mfspr_XDN(hwt, name) \
- ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
-#else
-#define mtspr_XDN(hwt, name, val) \
- do { \
- if ((hwt)->is_idn) \
- __insn_mtspr(SPR_IDN_##name, (val)); \
- else \
- __insn_mtspr(SPR_UDN_##name, (val)); \
- } while (0)
-#define mtspr_MPL_XDN(hwt, name, val) \
- do { \
- if ((hwt)->is_idn) \
- __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
- else \
- __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
- } while (0)
-#define mfspr_XDN(hwt, name) \
- ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
-#endif
-
-/* Set a CPU bit if the CPU is online. */
-#define cpu_online_set(cpu, dst) do { \
- if (cpu_online(cpu)) \
- cpumask_set_cpu(cpu, dst); \
-} while (0)
-
-
-/* Does the given rectangle contain the given x,y coordinate? */
-static int contains(struct hardwall_info *r, int x, int y)
-{
- return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
- (y >= r->ulhc_y && y < r->ulhc_y + r->height);
-}
-
-/* Compute the rectangle parameters and validate the cpumask. */
-static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
-{
- int x, y, cpu, ulhc, lrhc;
-
- /* The first cpu is the ULHC, the last the LRHC. */
- ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
- lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
-
- /* Compute the rectangle attributes from the cpus. */
- r->ulhc_x = cpu_x(ulhc);
- r->ulhc_y = cpu_y(ulhc);
- r->width = cpu_x(lrhc) - r->ulhc_x + 1;
- r->height = cpu_y(lrhc) - r->ulhc_y + 1;
-
- /* Width and height must be positive */
- if (r->width <= 0 || r->height <= 0)
- return -EINVAL;
-
- /* Confirm that the cpumask is exactly the rectangle. */
- for (y = 0, cpu = 0; y < smp_height; ++y)
- for (x = 0; x < smp_width; ++x, ++cpu)
- if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
- return -EINVAL;
-
- /*
- * Note that offline cpus can't be drained when this user network
- * rectangle eventually closes. We used to detect this
- * situation and print a warning, but it annoyed users and
- * they ignored it anyway, so now we just return without a
- * warning.
- */
- return 0;
-}
-
-/*
- * Hardware management of hardwall setup, teardown, trapping,
- * and enabling/disabling PL0 access to the networks.
- */
-
-/* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
-enum direction_protect {
- N_PROTECT = (1 << 0),
- E_PROTECT = (1 << 1),
- S_PROTECT = (1 << 2),
- W_PROTECT = (1 << 3),
- C_PROTECT = (1 << 4),
-};
-
-static inline int xdn_which_interrupt(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (hwt->is_idn)
- return INT_IDN_FIREWALL;
-#endif
- return INT_UDN_FIREWALL;
-}
-
-static void enable_firewall_interrupts(struct hardwall_type *hwt)
-{
- arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
-}
-
-static void disable_firewall_interrupts(struct hardwall_type *hwt)
-{
- arch_local_irq_mask_now(xdn_which_interrupt(hwt));
-}
-
-/* Set up hardwall on this cpu based on the passed hardwall_info. */
-static void hardwall_setup_func(void *info)
-{
- struct hardwall_info *r = info;
- struct hardwall_type *hwt = r->type;
-
- int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
- int x = cpu_x(cpu);
- int y = cpu_y(cpu);
- int bits = 0;
- if (x == r->ulhc_x)
- bits |= W_PROTECT;
- if (x == r->ulhc_x + r->width - 1)
- bits |= E_PROTECT;
- if (y == r->ulhc_y)
- bits |= N_PROTECT;
- if (y == r->ulhc_y + r->height - 1)
- bits |= S_PROTECT;
- BUG_ON(bits == 0);
- mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
- enable_firewall_interrupts(hwt);
-}
-
-/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
-static void hardwall_protect_rectangle(struct hardwall_info *r)
-{
- int x, y, cpu, delta;
- struct cpumask rect_cpus;
-
- cpumask_clear(&rect_cpus);
-
- /* First include the top and bottom edges */
- cpu = r->ulhc_y * smp_width + r->ulhc_x;
- delta = (r->height - 1) * smp_width;
- for (x = 0; x < r->width; ++x, ++cpu) {
- cpu_online_set(cpu, &rect_cpus);
- cpu_online_set(cpu + delta, &rect_cpus);
- }
-
- /* Then the left and right edges */
- cpu -= r->width;
- delta = r->width - 1;
- for (y = 0; y < r->height; ++y, cpu += smp_width) {
- cpu_online_set(cpu, &rect_cpus);
- cpu_online_set(cpu + delta, &rect_cpus);
- }
-
- /* Then tell all the cpus to set up their protection SPR */
- on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
-}
-
-/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
-void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
-{
- struct hardwall_info *rect;
- struct hardwall_type *hwt;
- struct task_struct *p;
- struct siginfo info;
- int cpu = smp_processor_id();
- int found_processes;
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
-
- /* Figure out which network trapped. */
- switch (fault_num) {
-#ifndef __tilepro__
- case INT_IDN_FIREWALL:
- hwt = &hardwall_types[HARDWALL_IDN];
- break;
-#endif
- case INT_UDN_FIREWALL:
- hwt = &hardwall_types[HARDWALL_UDN];
- break;
- default:
- BUG();
- }
- BUG_ON(hwt->disabled);
-
- /* This tile trapped a network access; find the rectangle. */
- spin_lock(&hwt->lock);
- list_for_each_entry(rect, &hwt->list, list) {
- if (cpumask_test_cpu(cpu, &rect->cpumask))
- break;
- }
-
- /*
- * It shouldn't be possible not to find this cpu on the
- * rectangle list, since only cpus in rectangles get hardwalled.
- * The hardwall is only removed after the user network is drained.
- */
- BUG_ON(&rect->list == &hwt->list);
-
- /*
- * If we already started teardown on this hardwall, don't worry;
- * the abort signal has been sent and we are just waiting for things
- * to quiesce.
- */
- if (rect->teardown_in_progress) {
- pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
- cpu, hwt->name,
- (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
- goto done;
- }
-
- /*
- * Kill off any process that is activated in this rectangle.
- * We bypass security to deliver the signal, since it must be
- * one of the activated processes that generated the user network
- * message that caused this trap, and all the activated
- * processes shared a single open file so are pretty tightly
- * bound together from a security point of view to begin with.
- */
- rect->teardown_in_progress = 1;
- wmb(); /* Ensure visibility of rectangle before notifying processes. */
- pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
- cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_HARDWALL;
- found_processes = 0;
- list_for_each_entry(p, &rect->task_head,
- thread.hardwall[hwt->index].list) {
- BUG_ON(p->thread.hardwall[hwt->index].info != rect);
- if (!(p->flags & PF_EXITING)) {
- found_processes = 1;
- pr_notice("hardwall: killing %d\n", p->pid);
- do_send_sig_info(info.si_signo, &info, p, false);
- }
- }
- if (!found_processes)
- pr_notice("hardwall: no associated processes!\n");
-
- done:
- spin_unlock(&hwt->lock);
-
- /*
- * We have to disable firewall interrupts now, or else when we
- * return from this handler, we will simply re-interrupt back to
- * it. However, we can't clear the protection bits, since we
- * haven't yet drained the network, and that would allow packets
- * to cross out of the hardwall region.
- */
- disable_firewall_interrupts(hwt);
-
- irq_exit();
- set_irq_regs(old_regs);
-}
-
-/* Allow access from user space to the user network. */
-void grant_hardwall_mpls(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (!hwt->is_xdn) {
- __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
- return;
- }
-#endif
- mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
- mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
- mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
- mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
-#if !CHIP_HAS_REV1_XDN()
- mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
- mtspr_MPL_XDN(hwt, CA_SET_0, 1);
-#endif
-}
-
-/* Deny access from user space to the user network. */
-void restrict_hardwall_mpls(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (!hwt->is_xdn) {
- __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
- return;
- }
-#endif
- mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
- mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
- mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
- mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
-#if !CHIP_HAS_REV1_XDN()
- mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
- mtspr_MPL_XDN(hwt, CA_SET_1, 1);
-#endif
-}
-
-/* Restrict or deny as necessary for the task we're switching to. */
-void hardwall_switch_tasks(struct task_struct *prev,
- struct task_struct *next)
-{
- int i;
- for (i = 0; i < HARDWALL_TYPES; ++i) {
- if (prev->thread.hardwall[i].info != NULL) {
- if (next->thread.hardwall[i].info == NULL)
- restrict_hardwall_mpls(&hardwall_types[i]);
- } else if (next->thread.hardwall[i].info != NULL) {
- grant_hardwall_mpls(&hardwall_types[i]);
- }
- }
-}
-
-/* Does this task have the right to IPI the given cpu? */
-int hardwall_ipi_valid(int cpu)
-{
-#ifdef __tilegx__
- struct hardwall_info *info =
- current->thread.hardwall[HARDWALL_IPI].info;
- return info && cpumask_test_cpu(cpu, &info->cpumask);
-#else
- return 0;
-#endif
-}
-
-/*
- * Code to create, activate, deactivate, and destroy hardwall resources.
- */
-
-/* Create a hardwall for the given resource */
-static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
- size_t size,
- const unsigned char __user *bits)
-{
- struct hardwall_info *iter, *info;
- struct cpumask mask;
- unsigned long flags;
- int rc;
-
- /* Reject crazy sizes out of hand, a la sys_mbind(). */
- if (size > PAGE_SIZE)
- return ERR_PTR(-EINVAL);
-
- /* Copy whatever fits into a cpumask. */
- if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
- return ERR_PTR(-EFAULT);
-
- /*
- * If the size was short, clear the rest of the mask;
- * otherwise validate that the rest of the user mask was zero
- * (we don't try hard to be efficient when validating huge masks).
- */
- if (size < sizeof(struct cpumask)) {
- memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
- } else if (size > sizeof(struct cpumask)) {
- size_t i;
- for (i = sizeof(struct cpumask); i < size; ++i) {
- char c;
- if (get_user(c, &bits[i]))
- return ERR_PTR(-EFAULT);
- if (c)
- return ERR_PTR(-EINVAL);
- }
- }
-
- /* Allocate a new hardwall_info optimistically. */
- info = kmalloc(sizeof(struct hardwall_info),
- GFP_KERNEL | __GFP_ZERO);
- if (info == NULL)
- return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&info->task_head);
- info->type = hwt;
-
- /* Compute the rectangle size and validate that it's plausible. */
- cpumask_copy(&info->cpumask, &mask);
- info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
- if (hwt->is_xdn) {
- rc = check_rectangle(info, &mask);
- if (rc != 0) {
- kfree(info);
- return ERR_PTR(rc);
- }
- }
-
- /*
- * Eliminate cpus that are not part of this Linux client.
- * Note that this allows for configurations that we might not want to
- * support, such as one client on every even cpu, another client on
- * every odd cpu.
- */
- cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
-
- /* Confirm it doesn't overlap and add it to the list. */
- spin_lock_irqsave(&hwt->lock, flags);
- list_for_each_entry(iter, &hwt->list, list) {
- if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
- spin_unlock_irqrestore(&hwt->lock, flags);
- kfree(info);
- return ERR_PTR(-EBUSY);
- }
- }
- list_add_tail(&info->list, &hwt->list);
- spin_unlock_irqrestore(&hwt->lock, flags);
-
- /* Set up appropriate hardwalling on all affected cpus. */
- if (hwt->is_xdn)
- hardwall_protect_rectangle(info);
-
- /* Create a /proc/tile/hardwall entry. */
- hardwall_add_proc(info);
-
- return info;
-}
-
-/* Activate a given hardwall on this cpu for this process. */
-static int hardwall_activate(struct hardwall_info *info)
-{
- int cpu;
- unsigned long flags;
- struct task_struct *p = current;
- struct thread_struct *ts = &p->thread;
- struct hardwall_type *hwt;
-
- /* Require a hardwall. */
- if (info == NULL)
- return -ENODATA;
-
- /* Not allowed to activate a hardwall that is being torn down. */
- if (info->teardown_in_progress)
- return -EINVAL;
-
- /*
- * Get our affinity; if we're not bound to this tile uniquely,
- * we can't access the network registers.
- */
- if (cpumask_weight(&p->cpus_allowed) != 1)
- return -EPERM;
-
- /* Make sure we are bound to a cpu assigned to this resource. */
- cpu = smp_processor_id();
- BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
- if (!cpumask_test_cpu(cpu, &info->cpumask))
- return -EINVAL;
-
- /* If we are already bound to this hardwall, it's a no-op. */
- hwt = info->type;
- if (ts->hardwall[hwt->index].info) {
- BUG_ON(ts->hardwall[hwt->index].info != info);
- return 0;
- }
-
- /* Success! This process gets to use the resource on this cpu. */
- ts->hardwall[hwt->index].info = info;
- spin_lock_irqsave(&hwt->lock, flags);
- list_add(&ts->hardwall[hwt->index].list, &info->task_head);
- spin_unlock_irqrestore(&hwt->lock, flags);
- grant_hardwall_mpls(hwt);
- printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
- p->pid, p->comm, hwt->name, cpu);
- return 0;
-}
-
-/*
- * Deactivate a task's hardwall. Must hold lock for hardwall_type.
- * This method may be called from exit_thread(), so we don't want to
- * rely on too many fields of struct task_struct still being valid.
- * We assume the cpus_allowed, pid, and comm fields are still valid.
- */
-static void _hardwall_deactivate(struct hardwall_type *hwt,
- struct task_struct *task)
-{
- struct thread_struct *ts = &task->thread;
-
- if (cpumask_weight(&task->cpus_allowed) != 1) {
- pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
- task->pid, task->comm, hwt->name,
- cpumask_weight(&task->cpus_allowed));
- BUG();
- }
-
- BUG_ON(ts->hardwall[hwt->index].info == NULL);
- ts->hardwall[hwt->index].info = NULL;
- list_del(&ts->hardwall[hwt->index].list);
- if (task == current)
- restrict_hardwall_mpls(hwt);
-}
-
-/* Deactivate a task's hardwall. */
-static int hardwall_deactivate(struct hardwall_type *hwt,
- struct task_struct *task)
-{
- unsigned long flags;
- int activated;
-
- spin_lock_irqsave(&hwt->lock, flags);
- activated = (task->thread.hardwall[hwt->index].info != NULL);
- if (activated)
- _hardwall_deactivate(hwt, task);
- spin_unlock_irqrestore(&hwt->lock, flags);
-
- if (!activated)
- return -EINVAL;
-
- printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
- task->pid, task->comm, hwt->name, raw_smp_processor_id());
- return 0;
-}
-
-void hardwall_deactivate_all(struct task_struct *task)
-{
- int i;
- for (i = 0; i < HARDWALL_TYPES; ++i)
- if (task->thread.hardwall[i].info)
- hardwall_deactivate(&hardwall_types[i], task);
-}
-
-/* Stop the switch before draining the network. */
-static void stop_xdn_switch(void *arg)
-{
-#if !CHIP_HAS_REV1_XDN()
- /* Freeze the switch and the demux. */
- __insn_mtspr(SPR_UDN_SP_FREEZE,
- SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
- SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
- SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
-#else
- /*
- * Drop all packets bound for the core or off the edge.
- * We rely on the normal hardwall protection setup code
- * to have set the low four bits to trigger firewall interrupts,
- * and shift those bits up to trigger "drop on send" semantics,
- * plus adding "drop on send to core" for all switches.
- * In practice it seems the switches latch the DIRECTION_PROTECT
- * SPR so they won't start dropping if they're already
- * delivering the last message to the core, but it doesn't
- * hurt to enable it here.
- */
- struct hardwall_type *hwt = arg;
- unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
- mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
-#endif
-}
-
-static void empty_xdn_demuxes(struct hardwall_type *hwt)
-{
-#ifndef __tilepro__
- if (hwt->is_idn) {
- while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
- (void) __tile_idn0_receive();
- while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
- (void) __tile_idn1_receive();
- return;
- }
-#endif
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
- (void) __tile_udn0_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
- (void) __tile_udn1_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
- (void) __tile_udn2_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
- (void) __tile_udn3_receive();
-}
-
-/* Drain all the state from a stopped switch. */
-static void drain_xdn_switch(void *arg)
-{
- struct hardwall_info *info = arg;
- struct hardwall_type *hwt = info->type;
-
-#if CHIP_HAS_REV1_XDN()
- /*
- * The switches have been configured to drop any messages
- * destined for cores (or off the edge of the rectangle).
- * But the current message may continue to be delivered,
- * so we wait until all the cores have finished any pending
- * messages before we stop draining.
- */
- int pending = mfspr_XDN(hwt, PENDING);
- while (pending--) {
- empty_xdn_demuxes(hwt);
- if (hwt->is_idn)
- __tile_idn_send(0);
- else
- __tile_udn_send(0);
- }
- atomic_dec(&info->xdn_pending_count);
- while (atomic_read(&info->xdn_pending_count))
- empty_xdn_demuxes(hwt);
-#else
- int i;
- int from_tile_words, ca_count;
-
- /* Empty out the 5 switch point fifos. */
- for (i = 0; i < 5; i++) {
- int words, j;
- __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
- words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
- for (j = 0; j < words; j++)
- (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
- BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
- }
-
- /* Dump out the 3 word fifo at top. */
- from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
- for (i = 0; i < from_tile_words; i++)
- (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
-
- /* Empty out demuxes. */
- empty_xdn_demuxes(hwt);
-
- /* Empty out catch all. */
- ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
- for (i = 0; i < ca_count; i++)
- (void) __insn_mfspr(SPR_UDN_CA_DATA);
- BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
-
- /* Clear demux logic. */
- __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
-
- /*
- * Write switch state; experimentation indicates that 0xc3000
- * is an idle switch point.
- */
- for (i = 0; i < 5; i++) {
- __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
- __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
- }
-#endif
-}
-
-/* Reset random XDN state registers at boot up and during hardwall teardown. */
-static void reset_xdn_network_state(struct hardwall_type *hwt)
-{
- if (hwt->disabled)
- return;
-
- /* Clear out other random registers so we have a clean slate. */
- mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
- mtspr_XDN(hwt, AVAIL_EN, 0);
- mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
-
-#if !CHIP_HAS_REV1_XDN()
- /* Reset UDN coordinates to their standard value */
- {
- unsigned int cpu = smp_processor_id();
- unsigned int x = cpu_x(cpu);
- unsigned int y = cpu_y(cpu);
- __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
- }
-
- /* Set demux tags to predefined values and enable them. */
- __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
- __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
- __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
- __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
- __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
-
- /* Set other rev0 random registers to a clean state. */
- __insn_mtspr(SPR_UDN_REFILL_EN, 0);
- __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
- __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
-
- /* Start the switch and demux. */
- __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
-#endif
-}
-
-void reset_network_state(void)
-{
- reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
-#ifndef __tilepro__
- reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
-#endif
-}
-
-/* Restart an XDN switch after draining. */
-static void restart_xdn_switch(void *arg)
-{
- struct hardwall_type *hwt = arg;
-
-#if CHIP_HAS_REV1_XDN()
- /* One last drain step to avoid races with injection and draining. */
- empty_xdn_demuxes(hwt);
-#endif
-
- reset_xdn_network_state(hwt);
-
- /* Disable firewall interrupts. */
- disable_firewall_interrupts(hwt);
-}
-
-/* Last reference to a hardwall is gone, so clear the network. */
-static void hardwall_destroy(struct hardwall_info *info)
-{
- struct task_struct *task;
- struct hardwall_type *hwt;
- unsigned long flags;
-
- /* Make sure this file actually represents a hardwall. */
- if (info == NULL)
- return;
-
- /*
- * Deactivate any remaining tasks. It's possible to race with
- * some other thread that is exiting and hasn't yet called
- * deactivate (when freeing its thread_info), so we carefully
- * deactivate any remaining tasks before freeing the
- * hardwall_info object itself.
- */
- hwt = info->type;
- info->teardown_in_progress = 1;
- spin_lock_irqsave(&hwt->lock, flags);
- list_for_each_entry(task, &info->task_head,
- thread.hardwall[hwt->index].list)
- _hardwall_deactivate(hwt, task);
- spin_unlock_irqrestore(&hwt->lock, flags);
-
- if (hwt->is_xdn) {
- /* Configure the switches for draining the user network. */
- printk(KERN_DEBUG
- "Clearing %s hardwall rectangle %dx%d %d,%d\n",
- hwt->name, info->width, info->height,
- info->ulhc_x, info->ulhc_y);
- on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
-
- /* Drain the network. */
-#if CHIP_HAS_REV1_XDN()
- atomic_set(&info->xdn_pending_count,
- cpumask_weight(&info->cpumask));
- on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
-#else
- on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
-#endif
-
- /* Restart switch and disable firewall. */
- on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
- }
-
- /* Remove the /proc/tile/hardwall entry. */
- hardwall_remove_proc(info);
-
- /* Now free the hardwall from the list. */
- spin_lock_irqsave(&hwt->lock, flags);
- BUG_ON(!list_empty(&info->task_head));
- list_del(&info->list);
- spin_unlock_irqrestore(&hwt->lock, flags);
- kfree(info);
-}
-
-
-static int hardwall_proc_show(struct seq_file *sf, void *v)
-{
- struct hardwall_info *info = sf->private;
-
- seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask));
- return 0;
-}
-
-static int hardwall_proc_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, hardwall_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations hardwall_proc_fops = {
- .open = hardwall_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void hardwall_add_proc(struct hardwall_info *info)
-{
- char buf[64];
- snprintf(buf, sizeof(buf), "%d", info->id);
- proc_create_data(buf, 0444, info->type->proc_dir,
- &hardwall_proc_fops, info);
-}
-
-static void hardwall_remove_proc(struct hardwall_info *info)
-{
- char buf[64];
- snprintf(buf, sizeof(buf), "%d", info->id);
- remove_proc_entry(buf, info->type->proc_dir);
-}
-
-int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *task)
-{
- int i;
- int n = 0;
- for (i = 0; i < HARDWALL_TYPES; ++i) {
- struct hardwall_info *info = task->thread.hardwall[i].info;
- if (info)
- seq_printf(m, "%s: %d\n", info->type->name, info->id);
- }
- return n;
-}
-
-void proc_tile_hardwall_init(struct proc_dir_entry *root)
-{
- int i;
- for (i = 0; i < HARDWALL_TYPES; ++i) {
- struct hardwall_type *hwt = &hardwall_types[i];
- if (hwt->disabled)
- continue;
- if (hardwall_proc_dir == NULL)
- hardwall_proc_dir = proc_mkdir("hardwall", root);
- hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
- }
-}
-
-
-/*
- * Character device support via ioctl/close.
- */
-
-static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
-{
- struct hardwall_info *info = file->private_data;
- int minor = iminor(file->f_mapping->host);
- struct hardwall_type* hwt;
-
- if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
- return -EINVAL;
-
- BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
- BUILD_BUG_ON(HARDWALL_TYPES !=
- sizeof(hardwall_types)/sizeof(hardwall_types[0]));
-
- if (minor < 0 || minor >= HARDWALL_TYPES)
- return -EINVAL;
- hwt = &hardwall_types[minor];
- WARN_ON(info && hwt != info->type);
-
- switch (_IOC_NR(a)) {
- case _HARDWALL_CREATE:
- if (hwt->disabled)
- return -ENOSYS;
- if (info != NULL)
- return -EALREADY;
- info = hardwall_create(hwt, _IOC_SIZE(a),
- (const unsigned char __user *)b);
- if (IS_ERR(info))
- return PTR_ERR(info);
- file->private_data = info;
- return 0;
-
- case _HARDWALL_ACTIVATE:
- return hardwall_activate(info);
-
- case _HARDWALL_DEACTIVATE:
- if (current->thread.hardwall[hwt->index].info != info)
- return -EINVAL;
- return hardwall_deactivate(hwt, current);
-
- case _HARDWALL_GET_ID:
- return info ? info->id : -EINVAL;
-
- default:
- return -EINVAL;
- }
-}
-
-#ifdef CONFIG_COMPAT
-static long hardwall_compat_ioctl(struct file *file,
- unsigned int a, unsigned long b)
-{
- /* Sign-extend the argument so it can be used as a pointer. */
- return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
-}
-#endif
-
-/* The user process closed the file; revoke access to user networks. */
-static int hardwall_flush(struct file *file, fl_owner_t owner)
-{
- struct hardwall_info *info = file->private_data;
- struct task_struct *task, *tmp;
- unsigned long flags;
-
- if (info) {
- /*
- * NOTE: if multiple threads are activated on this hardwall
- * file, the other threads will continue having access to the
- * user network until they are context-switched out and back
- * in again.
- *
- * NOTE: A NULL files pointer means the task is being torn
- * down, so in that case we also deactivate it.
- */
- struct hardwall_type *hwt = info->type;
- spin_lock_irqsave(&hwt->lock, flags);
- list_for_each_entry_safe(task, tmp, &info->task_head,
- thread.hardwall[hwt->index].list) {
- if (task->files == owner || task->files == NULL)
- _hardwall_deactivate(hwt, task);
- }
- spin_unlock_irqrestore(&hwt->lock, flags);
- }
-
- return 0;
-}
-
-/* This hardwall is gone, so destroy it. */
-static int hardwall_release(struct inode *inode, struct file *file)
-{
- hardwall_destroy(file->private_data);
- return 0;
-}
-
-static const struct file_operations dev_hardwall_fops = {
- .open = nonseekable_open,
- .unlocked_ioctl = hardwall_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hardwall_compat_ioctl,
-#endif
- .flush = hardwall_flush,
- .release = hardwall_release,
-};
-
-static struct cdev hardwall_dev;
-
-static int __init dev_hardwall_init(void)
-{
- int rc;
- dev_t dev;
-
- rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
- if (rc < 0)
- return rc;
- cdev_init(&hardwall_dev, &dev_hardwall_fops);
- rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
- if (rc < 0)
- return rc;
-
- return 0;
-}
-late_initcall(dev_hardwall_init);
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
deleted file mode 100644
index 8d5b40ff2922..000000000000
--- a/arch/tile/kernel/head_32.S
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE startup code.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/thread_info.h>
-#include <asm/processor.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
-#include <arch/spr_def.h>
-
-/*
- * This module contains the entry code for kernel images. It performs the
- * minimal setup needed to call the generic C routines.
- */
-
- __HEAD
-ENTRY(_start)
- /* Notify the hypervisor of what version of the API we want */
- {
- movei r1, TILE_CHIP
- movei r2, TILE_CHIP_REV
- }
- {
- moveli r0, _HV_VERSION_OLD_HV_INIT
- jal _hv_init
- }
- /* Get a reasonable default ASID in r0 */
- {
- move r0, zero
- jal _hv_inquire_asid
- }
- /* Install the default page table */
- {
- moveli r6, lo16(swapper_pgprot - PAGE_OFFSET)
- move r4, r0 /* use starting ASID of range for this page table */
- }
- {
- moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET)
- auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET)
- }
- {
- lw r2, r6
- addi r6, r6, 4
- }
- {
- lw r3, r6
- auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET)
- }
- {
- finv r6
- move r1, zero /* high 32 bits of CPA is zero */
- }
- {
- moveli lr, lo16(1f)
- moveli r5, CTX_PAGE_FLAG
- }
- {
- auli lr, lr, ha16(1f)
- j _hv_install_context
- }
-1:
-
- /* Get our processor number and save it away in SAVE_K_0. */
- jal _hv_inquire_topology
- mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
- add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
-
-#ifdef CONFIG_SMP
- /*
- * Load up our per-cpu offset. When the first (master) tile
- * boots, this value is still zero, so we will load boot_pc
- * with start_kernel, and boot_sp at the top of init_stack.
- * The master tile initializes the per-cpu offset array, so that
- * when subsequent (secondary) tiles boot, they will instead load
- * from their per-cpu versions of boot_sp and boot_pc.
- */
- moveli r5, lo16(__per_cpu_offset)
- auli r5, r5, ha16(__per_cpu_offset)
- s2a r5, r4, r5
- lw r5, r5
- bnz r5, 1f
-
- /*
- * Save the width and height to the smp_topology variable
- * for later use.
- */
- moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- {
- sw r0, r2
- addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET)
- }
- sw r0, r3
-1:
-#else
- move r5, zero
-#endif
-
- /* Load and go with the correct pc and sp. */
- {
- addli r1, r5, lo16(boot_sp)
- addli r0, r5, lo16(boot_pc)
- }
- {
- auli r1, r1, ha16(boot_sp)
- auli r0, r0, ha16(boot_pc)
- }
- lw r0, r0
- lw sp, r1
- or r4, sp, r4
- mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
- {
- move lr, zero /* stop backtraces in the called function */
- jr r0
- }
- ENDPROC(_start)
-
-__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
-ENTRY(empty_zero_page)
- .fill PAGE_SIZE,1,0
- END(empty_zero_page)
-
- .macro PTE va, cpa, bits1, no_org=0
- .ifeq \no_org
- .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
- .endif
- .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
- (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
- .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
- .endm
-
-__PAGE_ALIGNED_DATA
- .align PAGE_SIZE
-ENTRY(swapper_pg_dir)
- /*
- * All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as
- * VA = PA + PAGE_OFFSET. We remap things with more precise access
- * permissions and more respect for size of RAM later.
- */
- .set addr, 0
- .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT
- PTE addr + PAGE_OFFSET, addr, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
- (1 << (HV_PTE_INDEX_WRITABLE - 32))
- .set addr, addr + PGDIR_SIZE
- .endr
-
- /* The true text VAs are mapped as VA = PA + MEM_SV_START */
- PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
- (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
- .org swapper_pg_dir + PGDIR_SIZE
- END(swapper_pg_dir)
-
- /*
- * Isolate swapper_pgprot to its own cache line, since each cpu
- * starting up will read it using VA-is-PA and local homing.
- * This would otherwise likely conflict with other data on the cache
- * line, once we have set its permanent home in the page tables.
- */
- __INITDATA
- .align CHIP_L2_LINE_SIZE()
-ENTRY(swapper_pgprot)
- PTE 0, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
- (1 << (HV_PTE_INDEX_WRITABLE - 32)), 1
- .align CHIP_L2_LINE_SIZE()
- END(swapper_pgprot)
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
deleted file mode 100644
index bd0e12f283f3..000000000000
--- a/arch/tile/kernel/head_64.S
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE startup code.
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/thread_info.h>
-#include <asm/processor.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
-#include <arch/spr_def.h>
-
-/* Extract two 32-bit bit values that were read into one register. */
-#ifdef __BIG_ENDIAN__
-#define GET_FIRST_INT(rd, rs) shrsi rd, rs, 32
-#define GET_SECOND_INT(rd, rs) addxi rd, rs, 0
-#else
-#define GET_FIRST_INT(rd, rs) addxi rd, rs, 0
-#define GET_SECOND_INT(rd, rs) shrsi rd, rs, 32
-#endif
-
-/*
- * This module contains the entry code for kernel images. It performs the
- * minimal setup needed to call the generic C routines.
- */
-
- __HEAD
-ENTRY(_start)
- /* Notify the hypervisor of what version of the API we want */
- {
-#if KERNEL_PL == 1 && _HV_VERSION == 13
- /* Support older hypervisors by asking for API version 12. */
- movei r0, _HV_VERSION_OLD_HV_INIT
-#else
- movei r0, _HV_VERSION
-#endif
- movei r1, TILE_CHIP
- }
- {
- movei r2, TILE_CHIP_REV
- movei r3, KERNEL_PL
- }
- jal _hv_init
- /* Get a reasonable default ASID in r0 */
- {
- move r0, zero
- jal _hv_inquire_asid
- }
-
- /*
- * Install the default page table. The relocation required to
- * statically define the table is a bit too complex, so we have
- * to plug in the pointer from the L0 to the L1 table by hand.
- * We only do this on the first cpu to boot, though, since the
- * other CPUs should see a properly-constructed page table.
- */
- {
- GET_FIRST_INT(r2, r0) /* ASID for hv_install_context */
- moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
- }
- {
- shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET)
- }
- {
- ld r1, r4 /* access_pte for hv_install_context */
- }
- {
- moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET)
- moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET)
- }
- {
- /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
- bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
- finv r4
- }
- bnez r7, .Lno_write
- {
- shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET)
- shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET)
- }
- {
- /* Cut off the low bits of the PT address. */
- shrui r6, r6, HV_LOG2_PAGE_TABLE_ALIGN
- /* Start with our access pte. */
- move r5, r1
- }
- {
- /* Stuff the address into the page table pointer slot of the PTE. */
- bfins r5, r6, HV_PTE_INDEX_PTFN, \
- HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
- }
- {
- /* Store the L0 data PTE. */
- st r0, r5
- addli r6, r6, (temp_code_pmd - temp_data_pmd) >> \
- HV_LOG2_PAGE_TABLE_ALIGN
- }
- {
- addli r0, r0, .Lsv_code_pmd - .Lsv_data_pmd
- bfins r5, r6, HV_PTE_INDEX_PTFN, \
- HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
- }
- /* Store the L0 code PTE. */
- st r0, r5
-
-.Lno_write:
- moveli lr, hw2_last(1f)
- {
- shl16insli lr, lr, hw1(1f)
- moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET)
- }
- {
- shl16insli lr, lr, hw0(1f)
- shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
- }
- {
- moveli r3, CTX_PAGE_FLAG
- j _hv_install_context
- }
-1:
-
- /* Install the interrupt base. */
- moveli r0, hw2_last(intrpt_start)
- shl16insli r0, r0, hw1(intrpt_start)
- shl16insli r0, r0, hw0(intrpt_start)
- mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
-
- /* Get our processor number and save it away in SAVE_K_0. */
- jal _hv_inquire_topology
- {
- GET_FIRST_INT(r5, r1) /* r5 = width */
- GET_SECOND_INT(r4, r0) /* r4 = y */
- }
- {
- GET_FIRST_INT(r6, r0) /* r6 = x */
- mul_lu_lu r4, r4, r5
- }
- {
- add r4, r4, r6 /* r4 == cpu == y*width + x */
- }
-
-#ifdef CONFIG_SMP
- /*
- * Load up our per-cpu offset. When the first (master) tile
- * boots, this value is still zero, so we will load boot_pc
- * with start_kernel, and boot_sp with at the top of init_stack.
- * The master tile initializes the per-cpu offset array, so that
- * when subsequent (secondary) tiles boot, they will instead load
- * from their per-cpu versions of boot_sp and boot_pc.
- */
- moveli r5, hw2_last(__per_cpu_offset)
- shl16insli r5, r5, hw1(__per_cpu_offset)
- shl16insli r5, r5, hw0(__per_cpu_offset)
- shl3add r5, r4, r5
- ld r5, r5
- bnez r5, 1f
-
- /*
- * Save the width and height to the smp_topology variable
- * for later use.
- */
- moveli r0, hw2_last(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- shl16insli r0, r0, hw1(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- shl16insli r0, r0, hw0(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
- st r0, r1
-1:
-#else
- move r5, zero
-#endif
-
- /* Load and go with the correct pc and sp. */
- {
- moveli r1, hw2_last(boot_sp)
- moveli r0, hw2_last(boot_pc)
- }
- {
- shl16insli r1, r1, hw1(boot_sp)
- shl16insli r0, r0, hw1(boot_pc)
- }
- {
- shl16insli r1, r1, hw0(boot_sp)
- shl16insli r0, r0, hw0(boot_pc)
- }
- {
- add r1, r1, r5
- add r0, r0, r5
- }
- ld r0, r0
- ld sp, r1
- shli r4, r4, CPU_SHIFT
- bfins r4, sp, 0, CPU_SHIFT-1
- mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
- {
- move lr, zero /* stop backtraces in the called function */
- jr r0
- }
- ENDPROC(_start)
-
-__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
-ENTRY(empty_zero_page)
- .fill PAGE_SIZE,1,0
- END(empty_zero_page)
-
- .macro PTE cpa, bits1
- .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
- HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
- (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
- .endm
-
-__PAGE_ALIGNED_DATA
- .align PAGE_SIZE
-ENTRY(swapper_pg_dir)
- .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
-.Lsv_data_pmd:
- .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
-.Lsv_code_pmd:
- .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + SIZEOF_PGD
- END(swapper_pg_dir)
-
- .align HV_PAGE_TABLE_ALIGN
-ENTRY(temp_data_pmd)
- /*
- * We fill the PAGE_OFFSET pmd with huge pages with
- * VA = PA + PAGE_OFFSET. We remap things with more precise access
- * permissions later.
- */
- .set addr, 0
- .rept PTRS_PER_PMD
- PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
- .set addr, addr + HPAGE_SIZE
- .endr
- .org temp_data_pmd + SIZEOF_PMD
- END(temp_data_pmd)
-
- .align HV_PAGE_TABLE_ALIGN
-ENTRY(temp_code_pmd)
- /*
- * We fill the MEM_SV_START pmd with huge pages with
- * VA = PA + PAGE_OFFSET. We remap things with more precise access
- * permissions later.
- */
- .set addr, 0
- .rept PTRS_PER_PMD
- PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
- .set addr, addr + HPAGE_SIZE
- .endr
- .org temp_code_pmd + SIZEOF_PMD
- END(temp_code_pmd)
-
- /*
- * Isolate swapper_pgprot to its own cache line, since each cpu
- * starting up will read it using VA-is-PA and local homing.
- * This would otherwise likely conflict with other data on the cache
- * line, once we have set its permanent home in the page tables.
- */
- __INITDATA
- .align CHIP_L2_LINE_SIZE()
-ENTRY(swapper_pgprot)
- .quad HV_PTE_PRESENT | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
- .align CHIP_L2_LINE_SIZE()
- END(swapper_pgprot)
diff --git a/arch/tile/kernel/hvglue.S b/arch/tile/kernel/hvglue.S
deleted file mode 100644
index 70c661448638..000000000000
--- a/arch/tile/kernel/hvglue.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
-.macro gensym sym, val, size
-.org \val
-.global _\sym
-.type _\sym,function
-_\sym:
-.size _\sym,\size
-#ifndef CONFIG_TILE_HVGLUE_TRACE
-.globl \sym
-.set \sym,_\sym
-#endif
-.endm
-
-.section .hvglue,"x",@nobits
-.align 8
-gensym hv_init, 0x20, 32
-gensym hv_install_context, 0x40, 32
-gensym hv_sysconf, 0x60, 32
-gensym hv_get_rtc, 0x80, 32
-gensym hv_set_rtc, 0xa0, 32
-gensym hv_flush_asid, 0xc0, 32
-gensym hv_flush_page, 0xe0, 32
-gensym hv_flush_pages, 0x100, 32
-gensym hv_restart, 0x120, 32
-gensym hv_halt, 0x140, 32
-gensym hv_power_off, 0x160, 32
-gensym hv_inquire_physical, 0x180, 32
-gensym hv_inquire_memory_controller, 0x1a0, 32
-gensym hv_inquire_virtual, 0x1c0, 32
-gensym hv_inquire_asid, 0x1e0, 32
-gensym hv_nanosleep, 0x200, 32
-gensym hv_console_read_if_ready, 0x220, 32
-gensym hv_console_write, 0x240, 32
-gensym hv_downcall_dispatch, 0x260, 32
-gensym hv_inquire_topology, 0x280, 32
-gensym hv_fs_findfile, 0x2a0, 32
-gensym hv_fs_fstat, 0x2c0, 32
-gensym hv_fs_pread, 0x2e0, 32
-gensym hv_physaddr_read64, 0x300, 32
-gensym hv_physaddr_write64, 0x320, 32
-gensym hv_get_command_line, 0x340, 32
-gensym hv_set_caching, 0x360, 32
-gensym hv_bzero_page, 0x380, 32
-gensym hv_register_message_state, 0x3a0, 32
-gensym hv_send_message, 0x3c0, 32
-gensym hv_receive_message, 0x3e0, 32
-gensym hv_inquire_context, 0x400, 32
-gensym hv_start_all_tiles, 0x420, 32
-gensym hv_dev_open, 0x440, 32
-gensym hv_dev_close, 0x460, 32
-gensym hv_dev_pread, 0x480, 32
-gensym hv_dev_pwrite, 0x4a0, 32
-gensym hv_dev_poll, 0x4c0, 32
-gensym hv_dev_poll_cancel, 0x4e0, 32
-gensym hv_dev_preada, 0x500, 32
-gensym hv_dev_pwritea, 0x520, 32
-gensym hv_flush_remote, 0x540, 32
-gensym hv_console_putc, 0x560, 32
-gensym hv_inquire_tiles, 0x580, 32
-gensym hv_confstr, 0x5a0, 32
-gensym hv_reexec, 0x5c0, 32
-gensym hv_set_command_line, 0x5e0, 32
-gensym hv_clear_intr, 0x600, 32
-gensym hv_enable_intr, 0x620, 32
-gensym hv_disable_intr, 0x640, 32
-gensym hv_raise_intr, 0x660, 32
-gensym hv_trigger_ipi, 0x680, 32
-gensym hv_store_mapping, 0x6a0, 32
-gensym hv_inquire_realpa, 0x6c0, 32
-gensym hv_flush_all, 0x6e0, 32
-gensym hv_get_ipi_pte, 0x700, 32
-gensym hv_set_pte_super_shift, 0x720, 32
-gensym hv_console_set_ipi, 0x7e0, 32
-gensym hv_send_nmi, 0x820, 32
-gensym hv_glue_internals, 0x820, 30688
diff --git a/arch/tile/kernel/hvglue_trace.c b/arch/tile/kernel/hvglue_trace.c
deleted file mode 100644
index add0d71395c6..000000000000
--- a/arch/tile/kernel/hvglue_trace.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-/*
- * Pull in the hypervisor header so we declare all the ABI functions
- * with the underscore versions, then undef the names so that we can
- * provide our own wrapper versions.
- */
-#define hv_init _hv_init
-#define hv_install_context _hv_install_context
-#define hv_sysconf _hv_sysconf
-#define hv_get_rtc _hv_get_rtc
-#define hv_set_rtc _hv_set_rtc
-#define hv_flush_asid _hv_flush_asid
-#define hv_flush_page _hv_flush_page
-#define hv_flush_pages _hv_flush_pages
-#define hv_restart _hv_restart
-#define hv_halt _hv_halt
-#define hv_power_off _hv_power_off
-#define hv_inquire_physical _hv_inquire_physical
-#define hv_inquire_memory_controller _hv_inquire_memory_controller
-#define hv_inquire_virtual _hv_inquire_virtual
-#define hv_inquire_asid _hv_inquire_asid
-#define hv_nanosleep _hv_nanosleep
-#define hv_console_read_if_ready _hv_console_read_if_ready
-#define hv_console_write _hv_console_write
-#define hv_downcall_dispatch _hv_downcall_dispatch
-#define hv_inquire_topology _hv_inquire_topology
-#define hv_fs_findfile _hv_fs_findfile
-#define hv_fs_fstat _hv_fs_fstat
-#define hv_fs_pread _hv_fs_pread
-#define hv_physaddr_read64 _hv_physaddr_read64
-#define hv_physaddr_write64 _hv_physaddr_write64
-#define hv_get_command_line _hv_get_command_line
-#define hv_set_caching _hv_set_caching
-#define hv_bzero_page _hv_bzero_page
-#define hv_register_message_state _hv_register_message_state
-#define hv_send_message _hv_send_message
-#define hv_receive_message _hv_receive_message
-#define hv_inquire_context _hv_inquire_context
-#define hv_start_all_tiles _hv_start_all_tiles
-#define hv_dev_open _hv_dev_open
-#define hv_dev_close _hv_dev_close
-#define hv_dev_pread _hv_dev_pread
-#define hv_dev_pwrite _hv_dev_pwrite
-#define hv_dev_poll _hv_dev_poll
-#define hv_dev_poll_cancel _hv_dev_poll_cancel
-#define hv_dev_preada _hv_dev_preada
-#define hv_dev_pwritea _hv_dev_pwritea
-#define hv_flush_remote _hv_flush_remote
-#define hv_console_putc _hv_console_putc
-#define hv_inquire_tiles _hv_inquire_tiles
-#define hv_confstr _hv_confstr
-#define hv_reexec _hv_reexec
-#define hv_set_command_line _hv_set_command_line
-#define hv_clear_intr _hv_clear_intr
-#define hv_enable_intr _hv_enable_intr
-#define hv_disable_intr _hv_disable_intr
-#define hv_raise_intr _hv_raise_intr
-#define hv_trigger_ipi _hv_trigger_ipi
-#define hv_store_mapping _hv_store_mapping
-#define hv_inquire_realpa _hv_inquire_realpa
-#define hv_flush_all _hv_flush_all
-#define hv_get_ipi_pte _hv_get_ipi_pte
-#define hv_set_pte_super_shift _hv_set_pte_super_shift
-#define hv_console_set_ipi _hv_console_set_ipi
-#define hv_send_nmi _hv_send_nmi
-#include <hv/hypervisor.h>
-#undef hv_init
-#undef hv_install_context
-#undef hv_sysconf
-#undef hv_get_rtc
-#undef hv_set_rtc
-#undef hv_flush_asid
-#undef hv_flush_page
-#undef hv_flush_pages
-#undef hv_restart
-#undef hv_halt
-#undef hv_power_off
-#undef hv_inquire_physical
-#undef hv_inquire_memory_controller
-#undef hv_inquire_virtual
-#undef hv_inquire_asid
-#undef hv_nanosleep
-#undef hv_console_read_if_ready
-#undef hv_console_write
-#undef hv_downcall_dispatch
-#undef hv_inquire_topology
-#undef hv_fs_findfile
-#undef hv_fs_fstat
-#undef hv_fs_pread
-#undef hv_physaddr_read64
-#undef hv_physaddr_write64
-#undef hv_get_command_line
-#undef hv_set_caching
-#undef hv_bzero_page
-#undef hv_register_message_state
-#undef hv_send_message
-#undef hv_receive_message
-#undef hv_inquire_context
-#undef hv_start_all_tiles
-#undef hv_dev_open
-#undef hv_dev_close
-#undef hv_dev_pread
-#undef hv_dev_pwrite
-#undef hv_dev_poll
-#undef hv_dev_poll_cancel
-#undef hv_dev_preada
-#undef hv_dev_pwritea
-#undef hv_flush_remote
-#undef hv_console_putc
-#undef hv_inquire_tiles
-#undef hv_confstr
-#undef hv_reexec
-#undef hv_set_command_line
-#undef hv_clear_intr
-#undef hv_enable_intr
-#undef hv_disable_intr
-#undef hv_raise_intr
-#undef hv_trigger_ipi
-#undef hv_store_mapping
-#undef hv_inquire_realpa
-#undef hv_flush_all
-#undef hv_get_ipi_pte
-#undef hv_set_pte_super_shift
-#undef hv_console_set_ipi
-#undef hv_send_nmi
-
-/*
- * Provide macros based on <linux/syscalls.h> to provide a wrapper
- * function that invokes the same function with an underscore prefix.
- * We can't use the existing __SC_xxx macros because we need to
- * support up to nine arguments rather than up to six, and also this
- * way the file stands alone from possible changes in the
- * implementation of <linux/syscalls.h>.
- */
-#define HV_WRAP0(type, name) \
- type name(void); \
- type name(void) \
- { \
- return _##name(); \
- }
-#define __HV_DECL1(t1, a1) t1 a1
-#define __HV_DECL2(t2, a2, ...) t2 a2, __HV_DECL1(__VA_ARGS__)
-#define __HV_DECL3(t3, a3, ...) t3 a3, __HV_DECL2(__VA_ARGS__)
-#define __HV_DECL4(t4, a4, ...) t4 a4, __HV_DECL3(__VA_ARGS__)
-#define __HV_DECL5(t5, a5, ...) t5 a5, __HV_DECL4(__VA_ARGS__)
-#define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__)
-#define __HV_DECL7(t7, a7, ...) t7 a7, __HV_DECL6(__VA_ARGS__)
-#define __HV_DECL8(t8, a8, ...) t8 a8, __HV_DECL7(__VA_ARGS__)
-#define __HV_DECL9(t9, a9, ...) t9 a9, __HV_DECL8(__VA_ARGS__)
-#define __HV_PASS1(t1, a1) a1
-#define __HV_PASS2(t2, a2, ...) a2, __HV_PASS1(__VA_ARGS__)
-#define __HV_PASS3(t3, a3, ...) a3, __HV_PASS2(__VA_ARGS__)
-#define __HV_PASS4(t4, a4, ...) a4, __HV_PASS3(__VA_ARGS__)
-#define __HV_PASS5(t5, a5, ...) a5, __HV_PASS4(__VA_ARGS__)
-#define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__)
-#define __HV_PASS7(t7, a7, ...) a7, __HV_PASS6(__VA_ARGS__)
-#define __HV_PASS8(t8, a8, ...) a8, __HV_PASS7(__VA_ARGS__)
-#define __HV_PASS9(t9, a9, ...) a9, __HV_PASS8(__VA_ARGS__)
-#define HV_WRAPx(x, type, name, ...) \
- type name(__HV_DECL##x(__VA_ARGS__)); \
- type name(__HV_DECL##x(__VA_ARGS__)) \
- { \
- return _##name(__HV_PASS##x(__VA_ARGS__)); \
- }
-#define HV_WRAP1(type, name, ...) HV_WRAPx(1, type, name, __VA_ARGS__)
-#define HV_WRAP2(type, name, ...) HV_WRAPx(2, type, name, __VA_ARGS__)
-#define HV_WRAP3(type, name, ...) HV_WRAPx(3, type, name, __VA_ARGS__)
-#define HV_WRAP4(type, name, ...) HV_WRAPx(4, type, name, __VA_ARGS__)
-#define HV_WRAP5(type, name, ...) HV_WRAPx(5, type, name, __VA_ARGS__)
-#define HV_WRAP6(type, name, ...) HV_WRAPx(6, type, name, __VA_ARGS__)
-#define HV_WRAP7(type, name, ...) HV_WRAPx(7, type, name, __VA_ARGS__)
-#define HV_WRAP8(type, name, ...) HV_WRAPx(8, type, name, __VA_ARGS__)
-#define HV_WRAP9(type, name, ...) HV_WRAPx(9, type, name, __VA_ARGS__)
-
-/* List all the hypervisor API functions. */
-HV_WRAP4(void, hv_init, HV_VersionNumber, interface_version_number,
- int, chip_num, int, chip_rev_num, int, client_pl)
-HV_WRAP1(long, hv_sysconf, HV_SysconfQuery, query)
-HV_WRAP3(int, hv_confstr, HV_ConfstrQuery, query, HV_VirtAddr, buf, int, len)
-#if CHIP_HAS_IPI()
-HV_WRAP3(int, hv_get_ipi_pte, HV_Coord, tile, int, pl, HV_PTE*, pte)
-HV_WRAP3(int, hv_console_set_ipi, int, ipi, int, event, HV_Coord, coord);
-#else
-HV_WRAP1(void, hv_enable_intr, HV_IntrMask, enab_mask)
-HV_WRAP1(void, hv_disable_intr, HV_IntrMask, disab_mask)
-HV_WRAP1(void, hv_clear_intr, HV_IntrMask, clear_mask)
-HV_WRAP1(void, hv_raise_intr, HV_IntrMask, raise_mask)
-HV_WRAP2(HV_Errno, hv_trigger_ipi, HV_Coord, tile, int, interrupt)
-#endif /* !CHIP_HAS_IPI() */
-HV_WRAP3(int, hv_store_mapping, HV_VirtAddr, va, unsigned int, len,
- HV_PhysAddr, pa)
-HV_WRAP2(HV_PhysAddr, hv_inquire_realpa, HV_PhysAddr, cpa, unsigned int, len)
-HV_WRAP0(HV_RTCTime, hv_get_rtc)
-HV_WRAP1(void, hv_set_rtc, HV_RTCTime, time)
-HV_WRAP4(int, hv_install_context, HV_PhysAddr, page_table, HV_PTE, access,
- HV_ASID, asid, __hv32, flags)
-HV_WRAP2(int, hv_set_pte_super_shift, int, level, int, log2_count)
-HV_WRAP0(HV_Context, hv_inquire_context)
-HV_WRAP1(int, hv_flush_asid, HV_ASID, asid)
-HV_WRAP2(int, hv_flush_page, HV_VirtAddr, address, HV_PageSize, page_size)
-HV_WRAP3(int, hv_flush_pages, HV_VirtAddr, start, HV_PageSize, page_size,
- unsigned long, size)
-HV_WRAP1(int, hv_flush_all, int, preserve_global)
-HV_WRAP2(void, hv_restart, HV_VirtAddr, cmd, HV_VirtAddr, args)
-HV_WRAP0(void, hv_halt)
-HV_WRAP0(void, hv_power_off)
-HV_WRAP1(int, hv_reexec, HV_PhysAddr, entry)
-HV_WRAP0(HV_Topology, hv_inquire_topology)
-HV_WRAP3(HV_Errno, hv_inquire_tiles, HV_InqTileSet, set, HV_VirtAddr, cpumask,
- int, length)
-HV_WRAP1(HV_PhysAddrRange, hv_inquire_physical, int, idx)
-HV_WRAP2(HV_MemoryControllerInfo, hv_inquire_memory_controller, HV_Coord, coord,
- int, controller)
-HV_WRAP1(HV_VirtAddrRange, hv_inquire_virtual, int, idx)
-HV_WRAP1(HV_ASIDRange, hv_inquire_asid, int, idx)
-HV_WRAP1(void, hv_nanosleep, int, nanosecs)
-HV_WRAP0(int, hv_console_read_if_ready)
-HV_WRAP1(void, hv_console_putc, int, byte)
-HV_WRAP2(int, hv_console_write, HV_VirtAddr, bytes, int, len)
-HV_WRAP0(void, hv_downcall_dispatch)
-HV_WRAP1(int, hv_fs_findfile, HV_VirtAddr, filename)
-HV_WRAP1(HV_FS_StatInfo, hv_fs_fstat, int, inode)
-HV_WRAP4(int, hv_fs_pread, int, inode, HV_VirtAddr, buf,
- int, length, int, offset)
-HV_WRAP2(unsigned long long, hv_physaddr_read64, HV_PhysAddr, addr,
- HV_PTE, access)
-HV_WRAP3(void, hv_physaddr_write64, HV_PhysAddr, addr, HV_PTE, access,
- unsigned long long, val)
-HV_WRAP2(int, hv_get_command_line, HV_VirtAddr, buf, int, length)
-HV_WRAP2(HV_Errno, hv_set_command_line, HV_VirtAddr, buf, int, length)
-HV_WRAP1(void, hv_set_caching, unsigned long, bitmask)
-HV_WRAP2(void, hv_bzero_page, HV_VirtAddr, va, unsigned int, size)
-HV_WRAP1(HV_Errno, hv_register_message_state, HV_MsgState*, msgstate)
-HV_WRAP4(int, hv_send_message, HV_Recipient *, recips, int, nrecip,
- HV_VirtAddr, buf, int, buflen)
-HV_WRAP3(HV_RcvMsgInfo, hv_receive_message, HV_MsgState, msgstate,
- HV_VirtAddr, buf, int, buflen)
-HV_WRAP0(void, hv_start_all_tiles)
-HV_WRAP2(int, hv_dev_open, HV_VirtAddr, name, __hv32, flags)
-HV_WRAP1(int, hv_dev_close, int, devhdl)
-HV_WRAP5(int, hv_dev_pread, int, devhdl, __hv32, flags, HV_VirtAddr, va,
- __hv32, len, __hv64, offset)
-HV_WRAP5(int, hv_dev_pwrite, int, devhdl, __hv32, flags, HV_VirtAddr, va,
- __hv32, len, __hv64, offset)
-HV_WRAP3(int, hv_dev_poll, int, devhdl, __hv32, events, HV_IntArg, intarg)
-HV_WRAP1(int, hv_dev_poll_cancel, int, devhdl)
-HV_WRAP6(int, hv_dev_preada, int, devhdl, __hv32, flags, __hv32, sgl_len,
- HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
-HV_WRAP6(int, hv_dev_pwritea, int, devhdl, __hv32, flags, __hv32, sgl_len,
- HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
-HV_WRAP9(int, hv_flush_remote, HV_PhysAddr, cache_pa,
- unsigned long, cache_control, unsigned long*, cache_cpumask,
- HV_VirtAddr, tlb_va, unsigned long, tlb_length,
- unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask,
- HV_Remote_ASID*, asids, int, asidcount)
-HV_WRAP3(HV_NMI_Info, hv_send_nmi, HV_Coord, tile, unsigned long, info,
- __hv64, flags)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
deleted file mode 100644
index 9ff75e3a318a..000000000000
--- a/arch/tile/kernel/intvec_32.S
+++ /dev/null
@@ -1,1906 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Linux interrupt vectors.
- */
-
-#include <linux/linkage.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/unistd.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/irqflags.h>
-#include <asm/atomic_32.h>
-#include <asm/asm-offsets.h>
-#include <hv/hypervisor.h>
-#include <arch/abi.h>
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
-
-#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
-
- .macro push_reg reg, ptr=sp, delta=-4
- {
- sw \ptr, \reg
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg reg, ptr=sp, delta=4
- {
- lw \reg, \ptr
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
- {
- move \zreg, zero
- lw \reg, \ptr
- addi \ptr, \ptr, \delta
- }
- .endm
-
- .macro push_extra_callee_saves reg
- PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
- push_reg r51, \reg
- push_reg r50, \reg
- push_reg r49, \reg
- push_reg r48, \reg
- push_reg r47, \reg
- push_reg r46, \reg
- push_reg r45, \reg
- push_reg r44, \reg
- push_reg r43, \reg
- push_reg r42, \reg
- push_reg r41, \reg
- push_reg r40, \reg
- push_reg r39, \reg
- push_reg r38, \reg
- push_reg r37, \reg
- push_reg r36, \reg
- push_reg r35, \reg
- push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
- .endm
-
- .macro panic str
- .pushsection .rodata, "a"
-1:
- .asciz "\str"
- .popsection
- {
- moveli r0, lo16(1b)
- }
- {
- auli r0, r0, ha16(1b)
- jal panic
- }
- .endm
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
-intvec_feedback:
- .popsection
-#endif
-
- /*
- * Default interrupt handler.
- *
- * vecnum is where we'll put this code.
- * c_routine is the C routine we'll call.
- *
- * The C routine is passed two arguments:
- * - A pointer to the pt_regs state.
- * - The interrupt vector number.
- *
- * The "processing" argument specifies the code for processing
- * the interrupt. Defaults to "handle_interrupt".
- */
- .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
- .org (\vecnum << 8)
-intvec_\vecname:
- .ifc \vecnum, INT_SWINT_1
- blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
- .endif
-
- /* Temporarily save a register so we have somewhere to work. */
-
- mtspr SPR_SYSTEM_SAVE_K_1, r0
- mfspr r0, SPR_EX_CONTEXT_K_1
-
- /* The cmpxchg code clears sp to force us to reset it here on fault. */
- {
- bz sp, 2f
- andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- }
-
- .ifc \vecnum, INT_DOUBLE_FAULT
- /*
- * For double-faults from user-space, fall through to the normal
- * register save and stack setup path. Otherwise, it's the
- * hypervisor giving us one last chance to dump diagnostics, and we
- * branch to the kernel_double_fault routine to do so.
- */
- bz r0, 1f
- j _kernel_double_fault
-1:
- .else
- /*
- * If we're coming from user-space, then set sp to the top of
- * the kernel stack. Otherwise, assume sp is already valid.
- */
- {
- bnz r0, 0f
- move r0, sp
- }
- .endif
-
- .ifc \c_routine, do_page_fault
- /*
- * The page_fault handler may be downcalled directly by the
- * hypervisor even when Linux is running and has ICS set.
- *
- * In this case the contents of EX_CONTEXT_K_1 reflect the
- * previous fault and can't be relied on to choose whether or
- * not to reinitialize the stack pointer. So we add a test
- * to see whether SYSTEM_SAVE_K_2 has the high bit set,
- * and if so we don't reinitialize sp, since we must be coming
- * from Linux. (In fact the precise case is !(val & ~1),
- * but any Linux PC has to have the high bit set.)
- *
- * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
- * any path that turns into a downcall to one of our TLB handlers.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_2
- {
- blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
- move r0, sp
- }
- .endif
-
-2:
- /*
- * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
- * the current stack top in the higher bits. So we recover
- * our stack top by just masking off the low bits, then
- * point sp at the top aligned address on the actual stack page.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_0
- mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
-
-0:
- /*
- * Align the stack mod 64 so we can properly predict what
- * cache lines we need to write-hint to reduce memory fetch
- * latency as we enter the kernel. The layout of memory is
- * as follows, with cache line 0 at the lowest VA, and cache
- * line 4 just below the r0 value this "andi" computes.
- * Note that we never write to cache line 4, and we skip
- * cache line 1 for syscalls.
- *
- * cache line 4: ptregs padding (two words)
- * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
- * cache line 2: r30...r45
- * cache line 1: r14...r29
- * cache line 0: 2 x frame, r0..r13
- */
-#if STACK_TOP_DELTA != 64
-#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
-#endif
- andi r0, r0, -64
-
- /*
- * Push the first four registers on the stack, so that we can set
- * them to vector-unique values before we jump to the common code.
- *
- * Registers are pushed on the stack as a struct pt_regs,
- * with the sp initially just above the struct, and when we're
- * done, sp points to the base of the struct, minus
- * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
- *
- * This routine saves just the first four registers, plus the
- * stack context so we can do proper backtracing right away,
- * and defers to handle_interrupt to save the rest.
- * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
- */
- addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
- wh64 r0 /* cache line 3 */
- {
- sw r0, lr
- addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- }
- {
- sw r0, sp
- addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
- }
- {
- sw sp, r52
- addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
- }
- wh64 sp /* cache line 0 */
- {
- sw sp, r1
- addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
- }
- {
- sw sp, r2
- addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
- }
- {
- sw sp, r3
- addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
- }
- mfspr r0, SPR_EX_CONTEXT_K_0
- .ifc \processing,handle_syscall
- /*
- * Bump the saved PC by one bundle so that when we return, we won't
- * execute the same swint instruction again. We need to do this while
- * we're in the critical section.
- */
- addi r0, r0, 8
- .endif
- {
- sw sp, r0
- addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r0, SPR_EX_CONTEXT_K_1
- {
- sw sp, r0
- addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- /*
- * Use r0 for syscalls so it's a temporary; use r1 for interrupts
- * so that it gets passed through unchanged to the handler routine.
- * Note that the .if conditional confusingly spans bundles.
- */
- .ifc \processing,handle_syscall
- movei r0, \vecnum
- }
- {
- sw sp, r0
- .else
- movei r1, \vecnum
- }
- {
- sw sp, r1
- .endif
- addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
- }
- mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
- {
- sw sp, r0
- addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
- }
- {
- sw sp, zero /* write zero into "Next SP" frame pointer */
- addi sp, sp, -4 /* leave SP pointing at bottom of frame */
- }
- .ifc \processing,handle_syscall
- j handle_syscall
- .else
- /*
- * Capture per-interrupt SPR context to registers.
- * We overload the meaning of r3 on this path such that if its bit 31
- * is set, we have to mask all interrupts including NMIs before
- * clearing the interrupt critical section bit.
- * See discussion below at "finish_interrupt_save".
- */
- .ifc \c_routine, do_page_fault
- mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
- mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
- .else
- .ifc \vecnum, INT_DOUBLE_FAULT
- {
- mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
- movei r3, 0
- }
- .else
- .ifc \c_routine, do_trap
- {
- mfspr r2, GPV_REASON
- movei r3, 0
- }
- .else
- .ifc \c_routine, handle_perf_interrupt
- {
- mfspr r2, PERF_COUNT_STS
- movei r3, -1 /* not used, but set for consistency */
- }
- .else
- .ifc \c_routine, handle_perf_interrupt
- {
- mfspr r2, AUX_PERF_COUNT_STS
- movei r3, -1 /* not used, but set for consistency */
- }
- .else
- movei r3, 0
- .endif
- .endif
- .endif
- .endif
- .endif
- /* Put function pointer in r0 */
- moveli r0, lo16(\c_routine)
- {
- auli r0, r0, ha16(\c_routine)
- j \processing
- }
- .endif
- ENDPROC(intvec_\vecname)
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
- .org (\vecnum << 5)
- FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
- jrp lr
- .popsection
-#endif
-
- .endm
-
-
- /*
- * Save the rest of the registers that we didn't save in the actual
- * vector itself. We can't use r0-r10 inclusive here.
- */
- .macro finish_interrupt_save, function
-
- /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
- PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
- {
- .ifc \function,handle_syscall
- sw r52, r0
- .else
- sw r52, zero
- .endif
- PTREGS_PTR(r52, PTREGS_OFFSET_TP)
- }
-
- /*
- * For ordinary syscalls, we save neither caller- nor callee-
- * save registers, since the syscall invoker doesn't expect the
- * caller-saves to be saved, and the called kernel functions will
- * take care of saving the callee-saves for us.
- *
- * For interrupts we save just the caller-save registers. Saving
- * them is required (since the "caller" can't save them). Again,
- * the called kernel functions will restore the callee-save
- * registers for us appropriately.
- *
- * On return, we normally restore nothing special for syscalls,
- * and just the caller-save registers for interrupts.
- *
- * However, there are some important caveats to all this:
- *
- * - We always save a few callee-save registers to give us
- * some scratchpad registers to carry across function calls.
- *
- * - fork/vfork/etc require us to save all the callee-save
- * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
- *
- * - We always save r0..r5 and r10 for syscalls, since we need
- * to reload them a bit later for the actual kernel call, and
- * since we might need them for -ERESTARTNOINTR, etc.
- *
- * - Before invoking a signal handler, we save the unsaved
- * callee-save registers so they are visible to the
- * signal handler or any ptracer.
- *
- * - If the unsaved callee-save registers are modified, we set
- * a bit in pt_regs so we know to reload them from pt_regs
- * and not just rely on the kernel function unwinding.
- * (Done for ptrace register writes and SA_SIGINFO handler.)
- */
- {
- sw r52, tp
- PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
- }
- wh64 r52 /* cache line 2 */
- push_reg r33, r52
- push_reg r32, r52
- push_reg r31, r52
- .ifc \function,handle_syscall
- push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
- push_reg TREG_SYSCALL_NR_NAME, r52, \
- PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
- .else
-
- push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
- wh64 r52 /* cache line 1 */
- push_reg r29, r52
- push_reg r28, r52
- push_reg r27, r52
- push_reg r26, r52
- push_reg r25, r52
- push_reg r24, r52
- push_reg r23, r52
- push_reg r22, r52
- push_reg r21, r52
- push_reg r20, r52
- push_reg r19, r52
- push_reg r18, r52
- push_reg r17, r52
- push_reg r16, r52
- push_reg r15, r52
- push_reg r14, r52
- push_reg r13, r52
- push_reg r12, r52
- push_reg r11, r52
- push_reg r10, r52
- push_reg r9, r52
- push_reg r8, r52
- push_reg r7, r52
- push_reg r6, r52
-
- .endif
-
- push_reg r5, r52
- sw r52, r4
-
- /* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
- {
- mfspr r20, SPR_SYSTEM_SAVE_K_0
- moveli r21, lo16(__per_cpu_offset)
- }
- {
- auli r21, r21, ha16(__per_cpu_offset)
- mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
- }
- s2a r20, r20, r21
- lw tp, r20
-#else
- move tp, zero
-#endif
-
- /*
- * If we will be returning to the kernel, we will need to
- * reset the interrupt masks to the state they had before.
- * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
- * We load flags in r32 here so we can jump to .Lrestore_regs
- * directly after do_page_fault_ics() if necessary.
- */
- mfspr r32, SPR_EX_CONTEXT_K_1
- {
- andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
- }
- bzt r32, 1f /* zero if from user space */
- IRQS_DISABLED(r32) /* zero if irqs enabled */
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
-#endif
-1:
- .ifnc \function,handle_syscall
- /* Record the fact that we saved the caller-save registers above. */
- ori r32, r32, PT_FLAGS_CALLER_SAVES
- .endif
- sw r21, r32
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- /*
- * Notify the feedback routines that we were in the
- * appropriate fixed interrupt vector area. Note that we
- * still have ICS set at this point, so we can't invoke any
- * atomic operations or we will panic. The feedback
- * routines internally preserve r0..r10 and r30 up.
- */
- .ifnc \function,handle_syscall
- shli r20, r1, 5
- .else
- moveli r20, INT_SWINT_1 << 5
- .endif
- addli r20, r20, lo16(intvec_feedback)
- auli r20, r20, ha16(intvec_feedback)
- jalr r20
-
- /* And now notify the feedback routines that we are here. */
- FEEDBACK_ENTER(\function)
-#endif
-
- /*
- * we've captured enough state to the stack (including in
- * particular our EX_CONTEXT state) that we can now release
- * the interrupt critical section and replace it with our
- * standard "interrupts disabled" mask value. This allows
- * synchronous interrupts (and profile interrupts) to punch
- * through from this point onwards.
- *
- * If bit 31 of r3 is set during a non-NMI interrupt, we know we
- * are on the path where the hypervisor has punched through our
- * ICS with a page fault, so we call out to do_page_fault_ics()
- * to figure out what to do with it. If the fault was in
- * an atomic op, we unlock the atomic lock, adjust the
- * saved register state a little, and return "zero" in r4,
- * falling through into the normal page-fault interrupt code.
- * If the fault was in a kernel-space atomic operation, then
- * do_page_fault_ics() resolves it itself, returns "one" in r4,
- * and as a result goes directly to restoring registers and iret,
- * without trying to adjust the interrupt masks at all.
- * The do_page_fault_ics() API involves passing and returning
- * a five-word struct (in registers) to avoid writing the
- * save and restore code here.
- */
- .ifc \function,handle_nmi
- IRQ_DISABLE_ALL(r20)
- .else
- .ifnc \function,handle_syscall
- bgezt r3, 1f
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_page_fault_ics
- }
- FEEDBACK_REENTER(\function)
- bzt r4, 1f
- j .Lrestore_regs
-1:
- .endif
- IRQ_DISABLE(r20, r21)
- .endif
- mtspr INTERRUPT_CRITICAL_SECTION, zero
-
- /*
- * Prepare the first 256 stack bytes to be rapidly accessible
- * without having to fetch the background data. We don't really
- * know how far to write-hint, but kernel stacks generally
- * aren't that big, and write-hinting here does take some time.
- */
- addi r52, sp, -64
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- wh64 r52
-
-#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
- .ifnc \function,handle_nmi
- /*
- * We finally have enough state set up to notify the irq
- * tracing code that irqs were disabled on entry to the handler.
- * The TRACE_IRQS_OFF call clobbers registers r0-r29.
- * For syscalls, we already have the register state saved away
- * on the stack, so we don't bother to do any register saves here,
- * and later we pop the registers back off the kernel stack.
- * For interrupt handlers, save r0-r3 in callee-saved registers.
- */
- .ifnc \function,handle_syscall
- { move r30, r0; move r31, r1 }
- { move r32, r2; move r33, r3 }
- .endif
- TRACE_IRQS_OFF
-#ifdef CONFIG_CONTEXT_TRACKING
- jal context_tracking_user_exit
-#endif
- .ifnc \function,handle_syscall
- { move r0, r30; move r1, r31 }
- { move r2, r32; move r3, r33 }
- .endif
- .endif
-#endif
-
- .endm
-
- .macro check_single_stepping, kind, not_single_stepping
- /*
- * Check for single stepping in user-level priv
- * kind can be "normal", "ill", or "syscall"
- * At end, if fall-thru
- * r29: thread_info->step_state
- * r28: &pt_regs->pc
- * r27: pt_regs->pc
- * r26: thread_info->step_state->buffer
- */
-
- /* Check for single stepping */
- GET_THREAD_INFO(r29)
- {
- /* Get pointer to field holding step state */
- addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
-
- /* Get pointer to EX1 in register state */
- PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
- }
- {
- /* Get pointer to field holding PC */
- PTREGS_PTR(r28, PTREGS_OFFSET_PC)
-
- /* Load the pointer to the step state */
- lw r29, r29
- }
- /* Load EX1 */
- lw r27, r27
- {
- /* Points to flags */
- addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
-
- /* No single stepping if there is no step state structure */
- bzt r29, \not_single_stepping
- }
- {
- /* mask off ICS and any other high bits */
- andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
-
- /* Load pointer to single step instruction buffer */
- lw r26, r29
- }
- /* Check priv state */
- bnz r27, \not_single_stepping
-
- /* Get flags */
- lw r22, r23
- {
- /* Branch if single-step mode not enabled */
- bbnst r22, \not_single_stepping
-
- /* Clear enabled flag */
- andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
- }
- .ifc \kind,normal
- {
- /* Load PC */
- lw r27, r28
-
- /* Point to the entry containing the original PC */
- addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
- }
- {
- /* Disable single stepping flag */
- sw r23, r22
- }
- {
- /* Get the original pc */
- lw r24, r24
-
- /* See if the PC is at the start of the single step buffer */
- seq r25, r26, r27
- }
- /*
- * NOTE: it is really expected that the PC be in the single step buffer
- * at this point
- */
- bzt r25, \not_single_stepping
-
- /* Restore the original PC */
- sw r28, r24
- .else
- .ifc \kind,syscall
- {
- /* Load PC */
- lw r27, r28
-
- /* Point to the entry containing the next PC */
- addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
- }
- {
- /* Increment the stopped PC by the bundle size */
- addi r26, r26, 8
-
- /* Disable single stepping flag */
- sw r23, r22
- }
- {
- /* Get the next pc */
- lw r24, r24
-
- /*
- * See if the PC is one bundle past the start of the
- * single step buffer
- */
- seq r25, r26, r27
- }
- {
- /*
- * NOTE: it is really expected that the PC be in the
- * single step buffer at this point
- */
- bzt r25, \not_single_stepping
- }
- /* Set to the next PC */
- sw r28, r24
- .else
- {
- /* Point to 3rd bundle in buffer */
- addi r25, r26, 16
-
- /* Load PC */
- lw r27, r28
- }
- {
- /* Disable single stepping flag */
- sw r23, r22
-
- /* See if the PC is in the single step buffer */
- slte_u r24, r26, r27
- }
- {
- slte_u r25, r27, r25
-
- /*
- * NOTE: it is really expected that the PC be in the
- * single step buffer at this point
- */
- bzt r24, \not_single_stepping
- }
- bzt r25, \not_single_stepping
- .endif
- .endif
- .endm
-
- /*
- * Redispatch a downcall.
- */
- .macro dc_dispatch vecnum, vecname
- .org (\vecnum << 8)
-intvec_\vecname:
- j _hv_downcall_dispatch
- ENDPROC(intvec_\vecname)
- .endm
-
- /*
- * Common code for most interrupts. The C function we're eventually
- * going to is in r0, and the faultnum is in r1; the original
- * values for those registers are on the stack.
- */
- .pushsection .text.handle_interrupt,"ax"
-handle_interrupt:
- finish_interrupt_save handle_interrupt
-
- /*
- * Check for if we are single stepping in user level. If so, then
- * we need to restore the PC.
- */
-
- check_single_stepping normal, .Ldispatch_interrupt
-.Ldispatch_interrupt:
-
- /* Jump to the C routine; it should enable irqs as soon as possible. */
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_interrupt)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_interrupt)
-
-/*
- * This routine takes a boolean in r30 indicating if this is an NMI.
- * If so, we also expect a boolean in r31 indicating whether to
- * re-enable the oprofile interrupts.
- *
- * Note that .Lresume_userspace is jumped to directly in several
- * places, and we need to make sure r30 is set correctly in those
- * callers as well.
- */
-STD_ENTRY(interrupt_return)
- /* If we're resuming to kernel space, don't check thread flags. */
- {
- bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
- PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
- }
- lw r29, r29
- andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- bzt r29, .Lresume_userspace
-
-#ifdef CONFIG_PREEMPT
- /* Returning to kernel space. Check if we need preemption. */
- GET_THREAD_INFO(r29)
- addli r28, r29, THREAD_INFO_FLAGS_OFFSET
- {
- lw r28, r28
- addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
- }
- {
- andi r28, r28, _TIF_NEED_RESCHED
- lw r29, r29
- }
- bzt r28, 1f
- bnz r29, 1f
- /* Disable interrupts explicitly for preemption. */
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- jal preempt_schedule_irq
- FEEDBACK_REENTER(interrupt_return)
-1:
-#endif
-
- /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_PC)
- moveli r27, lo16(_cpu_idle_nap)
- }
- {
- lw r28, r29
- auli r27, r27, ha16(_cpu_idle_nap)
- }
- {
- seq r27, r27, r28
- }
- {
- bbns r27, .Lrestore_all
- addi r28, r28, 8
- }
- sw r29, r28
- j .Lrestore_all
-
-.Lresume_userspace:
- FEEDBACK_REENTER(interrupt_return)
-
- /*
- * Disable interrupts so as to make sure we don't
- * miss an interrupt that sets any of the thread flags (like
- * need_resched or sigpending) between sampling and the iret.
- * Routines like schedule() or do_signal() may re-enable
- * interrupts before returning.
- */
- IRQ_DISABLE(r20, r21)
- TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
-
- /*
- * See if there are any work items (including single-shot items)
- * to do. If so, save the callee-save registers to pt_regs
- * and then dispatch to C code.
- */
- GET_THREAD_INFO(r21)
- {
- addi r22, r21, THREAD_INFO_FLAGS_OFFSET
- moveli r20, lo16(_TIF_ALLWORK_MASK)
- }
- {
- lw r22, r22
- auli r20, r20, ha16(_TIF_ALLWORK_MASK)
- }
- and r1, r22, r20
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- bzt r1, .Lrestore_all
- }
- push_extra_callee_saves r0
- jal prepare_exit_to_usermode
-
- /*
- * In the NMI case we
- * omit the call to single_process_check_nohz, which normally checks
- * to see if we should start or stop the scheduler tick, because
- * we can't call arbitrary Linux code from an NMI context.
- * We always call the homecache TLB deferral code to re-trigger
- * the deferral mechanism.
- *
- * The other chunk of responsibility this code has is to reset the
- * interrupt masks appropriately to reset irqs and NMIs. We have
- * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
- * lockdep-type stuff, but we can't set ICS until afterwards, since
- * ICS can only be used in very tight chunks of code to avoid
- * tripping over various assertions that it is off.
- *
- * (There is what looks like a window of vulnerability here since
- * we might take a profile interrupt between the two SPR writes
- * that set the mask, but since we write the low SPR word first,
- * and our interrupt entry code checks the low SPR word, any
- * profile interrupt will actually disable interrupts in both SPRs
- * before returning, which is OK.)
- */
-.Lrestore_all:
- PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
- {
- lw r0, r0
- PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
- }
- {
- andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
- lw r32, r32
- }
- bnz r0, 1f
- j 2f
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
-#endif
-1: bbnst r32, 2f
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- bzt r30, .Lrestore_regs
- j 3f
-2: TRACE_IRQS_ON
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- IRQ_ENABLE(r20, r21)
- bzt r30, .Lrestore_regs
-3:
-
- /* We are relying on INT_PERF_COUNT at 33, and AUX_PERF_COUNT at 48 */
- {
- moveli r0, lo16(1 << (INT_PERF_COUNT - 32))
- bz r31, .Lrestore_regs
- }
- auli r0, r0, ha16(1 << (INT_AUX_PERF_COUNT - 32))
- mtspr SPR_INTERRUPT_MASK_RESET_K_1, r0
-
- /*
- * We now commit to returning from this interrupt, since we will be
- * doing things like setting EX_CONTEXT SPRs and unwinding the stack
- * frame. No calls should be made to any other code after this point.
- * This code should only be entered with ICS set.
- * r32 must still be set to ptregs.flags.
- * We launch loads to each cache line separately first, so we can
- * get some parallelism out of the memory subsystem.
- * We start zeroing caller-saved registers throughout, since
- * that will save some cycles if this turns out to be a syscall.
- */
-.Lrestore_regs:
- FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
-
- /*
- * Rotate so we have one high bit and one low bit to test.
- * - low bit says whether to restore all the callee-saved registers,
- * or just r30-r33, and r52 up.
- * - high bit (i.e. sign bit) says whether to restore all the
- * caller-saved registers, or just r0.
- */
-#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
-# error Rotate trick does not work :-)
-#endif
- {
- rli r20, r32, 30
- PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
- }
-
- /*
- * Load cache lines 0, 2, and 3 in that order, then use
- * the last loaded value, which makes it likely that the other
- * cache lines have also loaded, at which point we should be
- * able to safely read all the remaining words on those cache
- * lines without waiting for the memory subsystem.
- */
- pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
- pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
- pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
- {
- mtspr SPR_EX_CONTEXT_K_0, r21
- move r5, zero
- }
- {
- mtspr SPR_EX_CONTEXT_K_1, lr
- andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
- }
-
- /* Restore callee-saveds that we actually use. */
- pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
- pop_reg_zero r31, r7
- pop_reg_zero r32, r8
- pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
-
- /*
- * If we modified other callee-saveds, restore them now.
- * This is rare, but could be via ptrace or signal handler.
- */
- {
- move r10, zero
- bbs r20, .Lrestore_callees
- }
-.Lcontinue_restore_regs:
-
- /* Check if we're returning from a syscall. */
- {
- move r11, zero
- blzt r20, 1f /* no, so go restore callee-save registers */
- }
-
- /*
- * Check if we're returning to userspace.
- * Note that if we're not, we don't worry about zeroing everything.
- */
- {
- addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
- bnz lr, .Lkernel_return
- }
-
- /*
- * On return from syscall, we've restored r0 from pt_regs, but we
- * clear the remainder of the caller-saved registers. We could
- * restore the syscall arguments, but there's not much point,
- * and it ensures user programs aren't trying to use the
- * caller-saves if we clear them, as well as avoiding leaking
- * kernel pointers into userspace.
- */
- pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- {
- lw sp, sp
- move r14, zero
- move r15, zero
- }
- { move r16, zero; move r17, zero }
- { move r18, zero; move r19, zero }
- { move r20, zero; move r21, zero }
- { move r22, zero; move r23, zero }
- { move r24, zero; move r25, zero }
- { move r26, zero; move r27, zero }
-
- /* Set r1 to errno if we are returning an error, otherwise zero. */
- {
- moveli r29, 4096
- sub r1, zero, r0
- }
- slt_u r29, r1, r29
- {
- mnz r1, r29, r1
- move r29, zero
- }
- iret
-
- /*
- * Not a syscall, so restore caller-saved registers.
- * First kick off a load for cache line 1, which we're touching
- * for the first time here.
- */
- .align 64
-1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
- pop_reg r1
- pop_reg r2
- pop_reg r3
- pop_reg r4
- pop_reg r5
- pop_reg r6
- pop_reg r7
- pop_reg r8
- pop_reg r9
- pop_reg r10
- pop_reg r11
- pop_reg r12
- pop_reg r13
- pop_reg r14
- pop_reg r15
- pop_reg r16
- pop_reg r17
- pop_reg r18
- pop_reg r19
- pop_reg r20
- pop_reg r21
- pop_reg r22
- pop_reg r23
- pop_reg r24
- pop_reg r25
- pop_reg r26
- pop_reg r27
- pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
- /* r29 already restored above */
- bnz lr, .Lkernel_return
- pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- lw sp, sp
- iret
-
- /*
- * We can't restore tp when in kernel mode, since a thread might
- * have migrated from another cpu and brought a stale tp value.
- */
-.Lkernel_return:
- pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- lw sp, sp
- iret
-
- /* Restore callee-saved registers from r34 to r51. */
-.Lrestore_callees:
- addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
- pop_reg r34
- pop_reg r35
- pop_reg r36
- pop_reg r37
- pop_reg r38
- pop_reg r39
- pop_reg r40
- pop_reg r41
- pop_reg r42
- pop_reg r43
- pop_reg r44
- pop_reg r45
- pop_reg r46
- pop_reg r47
- pop_reg r48
- pop_reg r49
- pop_reg r50
- pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
- j .Lcontinue_restore_regs
- STD_ENDPROC(interrupt_return)
-
- /*
- * Some interrupts don't check for single stepping
- */
- .pushsection .text.handle_interrupt_no_single_step,"ax"
-handle_interrupt_no_single_step:
- finish_interrupt_save handle_interrupt_no_single_step
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_interrupt_no_single_step)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_interrupt_no_single_step)
-
- /*
- * "NMI" interrupts mask ALL interrupts before calling the
- * handler, and don't check thread flags, etc., on the way
- * back out. In general, the only things we do here for NMIs
- * are the register save/restore, fixing the PC if we were
- * doing single step, and the dataplane kernel-TLB management.
- * We don't (for example) deal with start/stop of the sched tick.
- */
- .pushsection .text.handle_nmi,"ax"
-handle_nmi:
- finish_interrupt_save handle_nmi
- check_single_stepping normal, .Ldispatch_nmi
-.Ldispatch_nmi:
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_nmi)
- {
- movei r30, 1
- seq r31, r0, zero
- }
- j interrupt_return
- STD_ENDPROC(handle_nmi)
-
- /*
- * Parallel code for syscalls to handle_interrupt.
- */
- .pushsection .text.handle_syscall,"ax"
-handle_syscall:
- finish_interrupt_save handle_syscall
-
- /*
- * Check for if we are single stepping in user level. If so, then
- * we need to restore the PC.
- */
- check_single_stepping syscall, .Ldispatch_syscall
-.Ldispatch_syscall:
-
- /* Enable irqs. */
- TRACE_IRQS_ON
- IRQ_ENABLE(r20, r21)
-
- /* Bump the counter for syscalls made on this tile. */
- moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- add r20, r20, tp
- lw r21, r20
- addi r21, r21, 1
- {
- sw r20, r21
- GET_THREAD_INFO(r31)
- }
-
- /* Trace syscalls, if requested. */
- addi r31, r31, THREAD_INFO_FLAGS_OFFSET
- lw r30, r31
- andi r30, r30, _TIF_SYSCALL_TRACE
- bzt r30, .Lrestore_syscall_regs
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_enter
- }
- FEEDBACK_REENTER(handle_syscall)
- blz r0, .Lsyscall_sigreturn_skip
-
- /*
- * We always reload our registers from the stack at this
- * point. They might be valid, if we didn't build with
- * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
- * doing syscall tracing, but there are enough cases now that it
- * seems simplest just to do the reload unconditionally.
- */
-.Lrestore_syscall_regs:
- PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
- pop_reg r0, r11
- pop_reg r1, r11
- pop_reg r2, r11
- pop_reg r3, r11
- pop_reg r4, r11
- pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
- pop_reg TREG_SYSCALL_NR_NAME, r11
-
- /* Ensure that the syscall number is within the legal range. */
- moveli r21, __NR_syscalls
- {
- slt_u r21, TREG_SYSCALL_NR_NAME, r21
- moveli r20, lo16(sys_call_table)
- }
- {
- bbns r21, .Linvalid_syscall
- auli r20, r20, ha16(sys_call_table)
- }
- s2a r20, TREG_SYSCALL_NR_NAME, r20
- lw r20, r20
-
- /* Jump to syscall handler. */
- jalr r20
-.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
-
- /*
- * Write our r0 onto the stack so it gets restored instead
- * of whatever the user had there before.
- */
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- sw r29, r0
-
-.Lsyscall_sigreturn_skip:
- FEEDBACK_REENTER(handle_syscall)
-
- /* Do syscall trace again, if requested. */
- lw r30, r31
- andi r30, r30, _TIF_SYSCALL_TRACE
- bzt r30, 1f
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_exit
- }
- FEEDBACK_REENTER(handle_syscall)
-1: {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
-
-.Linvalid_syscall:
- /* Report an invalid syscall back to the user program */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- movei r28, -ENOSYS
- }
- sw r29, r28
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(handle_syscall)
-
- /* Return the address for oprofile to suppress in backtraces. */
-STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
- lnk r0
- {
- addli r0, r0, .Lhandle_syscall_link - .
- jrp lr
- }
- STD_ENDPROC(handle_syscall_link_address)
-
-STD_ENTRY(ret_from_fork)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(ret_from_fork)
-
-STD_ENTRY(ret_from_kernel_thread)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- move r0, r31
- jalr r30
- }
- FEEDBACK_REENTER(ret_from_kernel_thread)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(ret_from_kernel_thread)
-
- /*
- * Code for ill interrupt.
- */
- .pushsection .text.handle_ill,"ax"
-handle_ill:
- finish_interrupt_save handle_ill
-
- /*
- * Check for if we are single stepping in user level. If so, then
- * we need to restore the PC.
- */
- check_single_stepping ill, .Ldispatch_normal_ill
-
- {
- /* See if the PC is the 1st bundle in the buffer */
- seq r25, r27, r26
-
- /* Point to the 2nd bundle in the buffer */
- addi r26, r26, 8
- }
- {
- /* Point to the original pc */
- addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
-
- /* Branch if the PC is the 1st bundle in the buffer */
- bnz r25, 3f
- }
- {
- /* See if the PC is the 2nd bundle of the buffer */
- seq r25, r27, r26
-
- /* Set PC to next instruction */
- addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
- }
- {
- /* Point to flags */
- addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
-
- /* Branch if PC is in the second bundle */
- bz r25, 2f
- }
- /* Load flags */
- lw r25, r25
- {
- /*
- * Get the offset for the register to restore
- * Note: the lower bound is 2, so we have implicit scaling by 4.
- * No multiplication of the register number by the size of a register
- * is needed.
- */
- mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
- SINGLESTEP_STATE_TARGET_UB
-
- /* Mask Rewrite_LR */
- andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
- }
- {
- addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
-
- /* Don't rewrite temp register */
- bz r25, 3f
- }
- {
- /* Get the temp value */
- lw r29, r29
-
- /* Point to where the register is stored */
- add r27, r27, sp
- }
-
- /* Add in the C ABI save area size to the register offset */
- addi r27, r27, C_ABI_SAVE_AREA_SIZE
-
- /* Restore the user's register with the temp value */
- sw r27, r29
- j 3f
-
-2:
- /* Must be in the third bundle */
- addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
-
-3:
- /* set PC and continue */
- lw r26, r24
- {
- sw r28, r26
- GET_THREAD_INFO(r0)
- }
-
- /*
- * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
- * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
- * need to clear it here and can't really impose on all other arches.
- * So what's another write between friends?
- */
-
- addi r1, r0, THREAD_INFO_FLAGS_OFFSET
- {
- lw r2, r1
- addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
- }
- andi r2, r2, ~_TIF_SINGLESTEP
- sw r1, r2
-
- /* Issue a sigtrap */
- {
- lw r0, r0 /* indirect thru thread_info to get task_info*/
- addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
- }
-
- jal send_sigtrap /* issue a SIGTRAP */
- FEEDBACK_REENTER(handle_ill)
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
-
-.Ldispatch_normal_ill:
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_ill)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_ill)
-
-/* Various stub interrupt handlers and syscall handlers */
-
-STD_ENTRY_LOCAL(_kernel_double_fault)
- mfspr r1, SPR_EX_CONTEXT_K_0
- move r2, lr
- move r3, sp
- move r4, r52
- addi sp, sp, -C_ABI_SAVE_AREA_SIZE
- j kernel_double_fault
- STD_ENDPROC(_kernel_double_fault)
-
-STD_ENTRY_LOCAL(bad_intr)
- mfspr r2, SPR_EX_CONTEXT_K_0
- panic "Unhandled interrupt %#x: PC %#lx"
- STD_ENDPROC(bad_intr)
-
-/*
- * Special-case sigreturn to not write r0 to the stack on return.
- * This is technically more efficient, but it also avoids difficulties
- * in the 64-bit OS when handling 32-bit compat code, since we must not
- * sign-extend r0 for the sigreturn return-value case.
- */
-#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
- STD_ENTRY(_##x); \
- addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
- { \
- PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
- j x \
- }; \
- STD_ENDPROC(_##x)
-
-PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
-
-/* Save additional callee-saves to pt_regs and jump to standard function. */
-STD_ENTRY(_sys_clone)
- push_extra_callee_saves r4
- j sys_clone
- STD_ENDPROC(_sys_clone)
-
-/*
- * This entrypoint is taken for the cmpxchg and atomic_update fast
- * swints. We may wish to generalize it to other fast swints at some
- * point, but for now there are just two very similar ones, which
- * makes it faster.
- *
- * The fast swint code is designed to have a small footprint. It does
- * not save or restore any GPRs, counting on the caller-save registers
- * to be available to it on entry. It does not modify any callee-save
- * registers (including "lr"). It does not check what PL it is being
- * called at, so you'd better not call it other than at PL0.
- * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
- * it ever is necessary to use more registers, be aware.
- *
- * It does not use the stack, but since it might be re-interrupted by
- * a page fault which would assume the stack was valid, it does
- * save/restore the stack pointer and zero it out to make sure it gets reset.
- * Since we always keep interrupts disabled, the hypervisor won't
- * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
- * (other than to advance the PC on return).
- *
- * We have to manually validate the user vs kernel address range
- * (since at PL1 we can read/write both), and for performance reasons
- * we don't allow cmpxchg on the fc000000 memory region, since we only
- * validate that the user address is below PAGE_OFFSET.
- *
- * We place it in the __HEAD section to ensure it is relatively
- * near to the intvec_SWINT_1 code (reachable by a conditional branch).
- *
- * Our use of ATOMIC_LOCK_REG here must match do_page_fault_ics().
- *
- * As we do in lib/atomic_asm_32.S, we bypass a store if the value we
- * would store is the same as the value we just loaded.
- */
- __HEAD
- .align 64
- /* Align much later jump on the start of a cache line. */
- nop
-#if PAGE_SIZE >= 0x10000
- nop
-#endif
-ENTRY(sys_cmpxchg)
-
- /*
- * Save "sp" and set it zero for any possible page fault.
- *
- * HACK: We want to both zero sp and check r0's alignment,
- * so we do both at once. If "sp" becomes nonzero we
- * know r0 is unaligned and branch to the error handler that
- * restores sp, so this is OK.
- *
- * ICS is disabled right now so having a garbage but nonzero
- * sp is OK, since we won't execute any faulting instructions
- * when it is nonzero.
- */
- {
- move r27, sp
- andi sp, r0, 3
- }
-
- /*
- * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
- * address is less than PAGE_OFFSET, since that won't trap at PL1.
- * We only use bits less than PAGE_SHIFT to avoid having to worry
- * about aliasing among multiple mappings of the same physical page,
- * and we ignore the low 3 bits so we have one lock that covers
- * both a cmpxchg64() and a cmpxchg() on either its low or high word.
- * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
- */
-
-#if (PAGE_OFFSET & 0xffff) != 0
-# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
-#endif
-
- {
- /* Check for unaligned input. */
- bnz sp, .Lcmpxchg_badaddr
- auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
- }
- {
- /*
- * Slide bits into position for 'mm'. We want to ignore
- * the low 3 bits of r0, and consider only the next
- * ATOMIC_HASH_SHIFT bits.
- * Because of C pointer arithmetic, we want to compute this:
- *
- * ((char*)atomic_locks +
- * (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
- *
- * Instead of two shifts we just ">> 1", and use 'mm'
- * to ignore the low and high bits we don't want.
- */
- shri r25, r0, 1
-
- slt_u r23, r0, r23
-
- /*
- * Ensure that the TLB is loaded before we take out the lock.
- * This will start fetching the value all the way into our L1
- * as well (and if it gets modified before we grab the lock,
- * it will be invalidated from our cache before we reload it).
- */
- lw r26, r0
- }
- {
- auli r21, zero, ha16(atomic_locks)
-
- bbns r23, .Lcmpxchg_badaddr
- }
-#if PAGE_SIZE < 0x10000
- /* atomic_locks is page-aligned so for big pages we don't need this. */
- addli r21, r21, lo16(atomic_locks)
-#endif
- {
- /*
- * Insert the hash bits into the page-aligned pointer.
- * ATOMIC_HASH_SHIFT is so big that we don't actually hash
- * the unmasked address bits, as that may cause unnecessary
- * collisions.
- */
- mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
-
- seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
- }
- {
- /* Branch away at this point if we're doing a 64-bit cmpxchg. */
- bbs r23, .Lcmpxchg64
- andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
- }
- {
- /*
- * We very carefully align the code that actually runs with
- * the lock held (twelve bundles) so that we know it is all in
- * the icache when we start. This instruction (the jump) is
- * at the start of the first cache line, address zero mod 64;
- * we jump to the very end of the second cache line to get that
- * line loaded in the icache, then fall through to issue the tns
- * in the third cache line, at which point it's all cached.
- * Note that is for performance, not correctness.
- */
- j .Lcmpxchg32_tns
- }
-
-/* Symbol for do_page_fault_ics() to use to compare against the PC. */
-.global __sys_cmpxchg_grab_lock
-__sys_cmpxchg_grab_lock:
-
- /*
- * Perform the actual cmpxchg or atomic_update.
- */
-.Ldo_cmpxchg32:
- {
- lw r21, r0
- seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
- move r24, r2
- }
- {
- seq r22, r21, r1 /* See if cmpxchg matches. */
- and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
- }
- {
- or r22, r22, r23 /* Skip compare branch for atomic_update. */
- add r25, r25, r2 /* Compute (*mem & mask) + addend. */
- }
- {
- mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
- bbns r22, .Lcmpxchg32_nostore
- }
- seq r22, r24, r21 /* Are we storing the value we loaded? */
- bbs r22, .Lcmpxchg32_nostore
- sw r0, r24
-
- /* The following instruction is the start of the second cache line. */
- /* Do slow mtspr here so the following "mf" waits less. */
- {
- move sp, r27
- mtspr SPR_EX_CONTEXT_K_0, r28
- }
- mf
-
- {
- move r0, r21
- sw ATOMIC_LOCK_REG_NAME, zero
- }
- iret
-
- /* Duplicated code here in the case where we don't overlap "mf" */
-.Lcmpxchg32_nostore:
- {
- move r0, r21
- sw ATOMIC_LOCK_REG_NAME, zero
- }
- {
- move sp, r27
- mtspr SPR_EX_CONTEXT_K_0, r28
- }
- iret
-
- /*
- * The locking code is the same for 32-bit cmpxchg/atomic_update,
- * and for 64-bit cmpxchg. We provide it as a macro and put
- * it into both versions. We can't share the code literally
- * since it depends on having the right branch-back address.
- */
- .macro cmpxchg_lock, bitwidth
-
- /* Lock; if we succeed, jump back up to the read-modify-write. */
-#ifdef CONFIG_SMP
- tns r21, ATOMIC_LOCK_REG_NAME
-#else
- /*
- * Non-SMP preserves all the lock infrastructure, to keep the
- * code simpler for the interesting (SMP) case. However, we do
- * one small optimization here and in atomic_asm.S, which is
- * to fake out acquiring the actual lock in the atomic_lock table.
- */
- movei r21, 0
-#endif
-
- /* Issue the slow SPR here while the tns result is in flight. */
- mfspr r28, SPR_EX_CONTEXT_K_0
-
- {
- addi r28, r28, 8 /* return to the instruction after the swint1 */
- bzt r21, .Ldo_cmpxchg\bitwidth
- }
- /*
- * The preceding instruction is the last thing that must be
- * hot in the icache before we do the "tns" above.
- */
-
-#ifdef CONFIG_SMP
- /*
- * We failed to acquire the tns lock on our first try. Now use
- * bounded exponential backoff to retry, like __atomic_spinlock().
- */
- {
- moveli r23, 2048 /* maximum backoff time in cycles */
- moveli r25, 32 /* starting backoff time in cycles */
- }
-1: mfspr r26, CYCLE_LOW /* get start point for this backoff */
-2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
- sub r22, r22, r26
- slt r22, r22, r25
- bbst r22, 2b
- {
- shli r25, r25, 1 /* double the backoff; retry the tns */
- tns r21, ATOMIC_LOCK_REG_NAME
- }
- slt r26, r23, r25 /* is the proposed backoff too big? */
- {
- mvnz r25, r26, r23
- bzt r21, .Ldo_cmpxchg\bitwidth
- }
- j 1b
-#endif /* CONFIG_SMP */
- .endm
-
-.Lcmpxchg32_tns:
- /*
- * This is the last instruction on the second cache line.
- * The nop here loads the second line, then we fall through
- * to the tns to load the third line before we take the lock.
- */
- nop
- cmpxchg_lock 32
-
- /*
- * This code is invoked from sys_cmpxchg after most of the
- * preconditions have been checked. We still need to check
- * that r0 is 8-byte aligned, since if it's not we won't
- * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
- * lock pointer and r27/r28 have the saved SP/PC.
- * r23 is holding "r0 & 7" so we can test for alignment.
- * The compare value is in r2/r3; the new value is in r4/r5.
- * On return, we must put the old value in r0/r1.
- */
- .align 64
-.Lcmpxchg64:
- {
- bzt r23, .Lcmpxchg64_tns
- }
- j .Lcmpxchg_badaddr
-
-.Ldo_cmpxchg64:
- {
- lw r21, r0
- addi r25, r0, 4
- }
- {
- lw r1, r25
- }
- seq r26, r21, r2
- {
- bz r26, .Lcmpxchg64_mismatch
- seq r26, r1, r3
- }
- {
- bz r26, .Lcmpxchg64_mismatch
- }
- sw r0, r4
- sw r25, r5
-
- /*
- * The 32-bit path provides optimized "match" and "mismatch"
- * iret paths, but we don't have enough bundles in this cache line
- * to do that, so we just make even the "mismatch" path do an "mf".
- */
-.Lcmpxchg64_mismatch:
- {
- move sp, r27
- mtspr SPR_EX_CONTEXT_K_0, r28
- }
- mf
- {
- move r0, r21
- sw ATOMIC_LOCK_REG_NAME, zero
- }
- iret
-
-.Lcmpxchg64_tns:
- cmpxchg_lock 64
-
-
- /*
- * Reset sp and revector to sys_cmpxchg_badaddr(), which will
- * just raise the appropriate signal and exit. Doing it this
- * way means we don't have to duplicate the code in intvec.S's
- * int_hand macro that locates the top of the stack.
- */
-.Lcmpxchg_badaddr:
- {
- moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
- move sp, r27
- }
- j intvec_SWINT_1
- ENDPROC(sys_cmpxchg)
- ENTRY(__sys_cmpxchg_end)
-
-
-/* The single-step support may need to read all the registers. */
-int_unalign:
- push_extra_callee_saves r0
- j do_trap
-
-/* Include .intrpt array of interrupt vectors */
- .section ".intrpt", "ax"
-
-#ifndef CONFIG_USE_PMC
-#define handle_perf_interrupt bad_intr
-#endif
-
-#ifndef CONFIG_HARDWALL
-#define do_hardwall_trap bad_intr
-#endif
-
- int_hand INT_ITLB_MISS, ITLB_MISS, \
- do_page_fault, handle_interrupt_no_single_step
- int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
- int_hand INT_ILL, ILL, do_trap, handle_ill
- int_hand INT_GPV, GPV, do_trap
- int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
- int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
- int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
- int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
- int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
- int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
- int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
- int_hand INT_SWINT_3, SWINT_3, do_trap
- int_hand INT_SWINT_2, SWINT_2, do_trap
- int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
- int_hand INT_SWINT_0, SWINT_0, do_trap
- int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
- int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
- int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
- int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
- int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
- int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
- int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
- int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
- int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
- int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
- int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
- int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
- int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
- int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
- int_hand INT_IDN_CA, IDN_CA, bad_intr
- int_hand INT_UDN_CA, UDN_CA, bad_intr
- int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
- int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
- int_hand INT_PERF_COUNT, PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
- int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- dc_dispatch INT_INTCTRL_2, INTCTRL_2
- int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
-#else
- int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
- dc_dispatch INT_INTCTRL_1, INTCTRL_1
-#endif
- int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
- int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
- hv_message_intr
- int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
- tile_dev_intr
- int_hand INT_I_ASID, I_ASID, bad_intr
- int_hand INT_D_ASID, D_ASID, bad_intr
- int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
- do_page_fault
- int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
- do_page_fault
- int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
- do_page_fault
- int_hand INT_SN_CPL, SN_CPL, bad_intr
- int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
- int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
-
- /* Synthetic interrupt delivered only by the simulator */
- int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
deleted file mode 100644
index 3b51bdf37d11..000000000000
--- a/arch/tile/kernel/intvec_64.S
+++ /dev/null
@@ -1,1564 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Linux interrupt vectors.
- */
-
-#include <linux/linkage.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/init.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/irqflags.h>
-#include <asm/asm-offsets.h>
-#include <asm/types.h>
-#include <asm/traps.h>
-#include <asm/signal.h>
-#include <hv/hypervisor.h>
-#include <arch/abi.h>
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
-
-#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
-
-#if CONFIG_KERNEL_PL == 1 || CONFIG_KERNEL_PL == 2
-/*
- * Set "result" non-zero if ex1 holds the PL of the kernel
- * (with or without ICS being set). Note this works only
- * because we never find the PL at level 3.
- */
-# define IS_KERNEL_EX1(result, ex1) andi result, ex1, CONFIG_KERNEL_PL
-#else
-# error Recode IS_KERNEL_EX1 for CONFIG_KERNEL_PL
-#endif
-
- .macro push_reg reg, ptr=sp, delta=-8
- {
- st \ptr, \reg
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg reg, ptr=sp, delta=8
- {
- ld \reg, \ptr
- addli \ptr, \ptr, \delta
- }
- .endm
-
- .macro pop_reg_zero reg, zreg, ptr=sp, delta=8
- {
- move \zreg, zero
- ld \reg, \ptr
- addi \ptr, \ptr, \delta
- }
- .endm
-
- .macro push_extra_callee_saves reg
- PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
- push_reg r51, \reg
- push_reg r50, \reg
- push_reg r49, \reg
- push_reg r48, \reg
- push_reg r47, \reg
- push_reg r46, \reg
- push_reg r45, \reg
- push_reg r44, \reg
- push_reg r43, \reg
- push_reg r42, \reg
- push_reg r41, \reg
- push_reg r40, \reg
- push_reg r39, \reg
- push_reg r38, \reg
- push_reg r37, \reg
- push_reg r36, \reg
- push_reg r35, \reg
- push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
- .endm
-
- .macro panic str
- .pushsection .rodata, "a"
-1:
- .asciz "\str"
- .popsection
- {
- moveli r0, hw2_last(1b)
- }
- {
- shl16insli r0, r0, hw1(1b)
- }
- {
- shl16insli r0, r0, hw0(1b)
- jal panic
- }
- .endm
-
- /*
- * Unalign data exception fast handling: In order to handle
- * unaligned data access, a fast JIT version is generated and stored
- * in a specific area in user space. We first need to do a quick poke
- * to see if the JIT is available. We use certain bits in the fault
- * PC (3 to 9 is used for 16KB page size) as index to address the JIT
- * code area. The first 64bit word is the fault PC, and the 2nd one is
- * the fault bundle itself. If these 2 words both match, then we
- * directly "iret" to JIT code. If not, a slow path is invoked to
- * generate new JIT code. Note: the current JIT code WILL be
- * overwritten if it existed. So, ideally we can handle 128 unalign
- * fixups via JIT. For lookup efficiency and to effectively support
- * tight loops with multiple unaligned reference, a simple
- * direct-mapped cache is used.
- *
- * SPR_EX_CONTEXT_K_0 is modified to return to JIT code.
- * SPR_EX_CONTEXT_K_1 has ICS set.
- * SPR_EX_CONTEXT_0_0 is setup to user program's next PC.
- * SPR_EX_CONTEXT_0_1 = 0.
- */
- .macro int_hand_unalign_fast vecnum, vecname
- .org (\vecnum << 8)
-intvec_\vecname:
- /* Put r3 in SPR_SYSTEM_SAVE_K_1. */
- mtspr SPR_SYSTEM_SAVE_K_1, r3
-
- mfspr r3, SPR_EX_CONTEXT_K_1
- /*
- * Examine if exception comes from user without ICS set.
- * If not, just go directly to the slow path.
- */
- bnez r3, hand_unalign_slow_nonuser
-
- mfspr r3, SPR_SYSTEM_SAVE_K_0
-
- /* Get &thread_info->unalign_jit_tmp[0] in r3. */
- bfexts r3, r3, 0, CPU_SHIFT-1
- mm r3, zero, LOG2_THREAD_SIZE, 63
- addli r3, r3, THREAD_INFO_UNALIGN_JIT_TMP_OFFSET
-
- /*
- * Save r0, r1, r2 into thread_info array r3 points to
- * from low to high memory in order.
- */
- st_add r3, r0, 8
- st_add r3, r1, 8
- {
- st_add r3, r2, 8
- andi r2, sp, 7
- }
-
- /* Save stored r3 value so we can revert it on a page fault. */
- mfspr r1, SPR_SYSTEM_SAVE_K_1
- st r3, r1
-
- {
- /* Generate a SIGBUS if sp is not 8-byte aligned. */
- bnez r2, hand_unalign_slow_badsp
- }
-
- /*
- * Get the thread_info in r0; load r1 with pc. Set the low bit of sp
- * as an indicator to the page fault code in case we fault.
- */
- {
- ori sp, sp, 1
- mfspr r1, SPR_EX_CONTEXT_K_0
- }
-
- /* Add the jit_info offset in thread_info; extract r1 [3:9] into r2. */
- {
- addli r0, r3, THREAD_INFO_UNALIGN_JIT_BASE_OFFSET - \
- (THREAD_INFO_UNALIGN_JIT_TMP_OFFSET + (3 * 8))
- bfextu r2, r1, 3, (2 + PAGE_SHIFT - UNALIGN_JIT_SHIFT)
- }
-
- /* Load the jit_info; multiply r2 by 128. */
- {
- ld r0, r0
- shli r2, r2, UNALIGN_JIT_SHIFT
- }
-
- /*
- * If r0 is NULL, the JIT page is not mapped, so go to slow path;
- * add offset r2 to r0 at the same time.
- */
- {
- beqz r0, hand_unalign_slow
- add r2, r0, r2
- }
-
- /*
- * We are loading from userspace (both the JIT info PC and
- * instruction word, and the instruction word we executed)
- * and since either could fault while holding the interrupt
- * critical section, we must tag this region and check it in
- * do_page_fault() to handle it properly.
- */
-ENTRY(__start_unalign_asm_code)
-
- /* Load first word of JIT in r0 and increment r2 by 8. */
- ld_add r0, r2, 8
-
- /*
- * Compare the PC with the 1st word in JIT; load the fault bundle
- * into r1.
- */
- {
- cmpeq r0, r0, r1
- ld r1, r1
- }
-
- /* Go to slow path if PC doesn't match. */
- beqz r0, hand_unalign_slow
-
- /*
- * Load the 2nd word of JIT, which is supposed to be the fault
- * bundle for a cache hit. Increment r2; after this bundle r2 will
- * point to the potential start of the JIT code we want to run.
- */
- ld_add r0, r2, 8
-
- /* No further accesses to userspace are done after this point. */
-ENTRY(__end_unalign_asm_code)
-
- /* Compare the real bundle with what is saved in the JIT area. */
- {
- cmpeq r0, r1, r0
- mtspr SPR_EX_CONTEXT_0_1, zero
- }
-
- /* Go to slow path if the fault bundle does not match. */
- beqz r0, hand_unalign_slow
-
- /*
- * A cache hit is found.
- * r2 points to start of JIT code (3rd word).
- * r0 is the fault pc.
- * r1 is the fault bundle.
- * Reset the low bit of sp.
- */
- {
- mfspr r0, SPR_EX_CONTEXT_K_0
- andi sp, sp, ~1
- }
-
- /* Write r2 into EX_CONTEXT_K_0 and increment PC. */
- {
- mtspr SPR_EX_CONTEXT_K_0, r2
- addi r0, r0, 8
- }
-
- /*
- * Set ICS on kernel EX_CONTEXT_K_1 in order to "iret" to
- * user with ICS set. This way, if the JIT fixup causes another
- * unalign exception (which shouldn't be possible) the user
- * process will be terminated with SIGBUS. Also, our fixup will
- * run without interleaving with external interrupts.
- * Each fixup is at most 14 bundles, so it won't hold ICS for long.
- */
- {
- movei r1, PL_ICS_EX1(USER_PL, 1)
- mtspr SPR_EX_CONTEXT_0_0, r0
- }
-
- {
- mtspr SPR_EX_CONTEXT_K_1, r1
- addi r3, r3, -(3 * 8)
- }
-
- /* Restore r0..r3. */
- ld_add r0, r3, 8
- ld_add r1, r3, 8
- ld_add r2, r3, 8
- ld r3, r3
-
- iret
- ENDPROC(intvec_\vecname)
- .endm
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
-intvec_feedback:
- .popsection
-#endif
-
- /*
- * Default interrupt handler.
- *
- * vecnum is where we'll put this code.
- * c_routine is the C routine we'll call.
- *
- * The C routine is passed two arguments:
- * - A pointer to the pt_regs state.
- * - The interrupt vector number.
- *
- * The "processing" argument specifies the code for processing
- * the interrupt. Defaults to "handle_interrupt".
- */
- .macro __int_hand vecnum, vecname, c_routine,processing=handle_interrupt
-intvec_\vecname:
- /* Temporarily save a register so we have somewhere to work. */
-
- mtspr SPR_SYSTEM_SAVE_K_1, r0
- mfspr r0, SPR_EX_CONTEXT_K_1
-
- /*
- * The unalign data fastpath code sets the low bit in sp to
- * force us to reset it here on fault.
- */
- {
- blbs sp, 2f
- IS_KERNEL_EX1(r0, r0)
- }
-
- .ifc \vecnum, INT_DOUBLE_FAULT
- /*
- * For double-faults from user-space, fall through to the normal
- * register save and stack setup path. Otherwise, it's the
- * hypervisor giving us one last chance to dump diagnostics, and we
- * branch to the kernel_double_fault routine to do so.
- */
- beqz r0, 1f
- j _kernel_double_fault
-1:
- .else
- /*
- * If we're coming from user-space, then set sp to the top of
- * the kernel stack. Otherwise, assume sp is already valid.
- */
- {
- bnez r0, 0f
- move r0, sp
- }
- .endif
-
- .ifc \c_routine, do_page_fault
- /*
- * The page_fault handler may be downcalled directly by the
- * hypervisor even when Linux is running and has ICS set.
- *
- * In this case the contents of EX_CONTEXT_K_1 reflect the
- * previous fault and can't be relied on to choose whether or
- * not to reinitialize the stack pointer. So we add a test
- * to see whether SYSTEM_SAVE_K_2 has the high bit set,
- * and if so we don't reinitialize sp, since we must be coming
- * from Linux. (In fact the precise case is !(val & ~1),
- * but any Linux PC has to have the high bit set.)
- *
- * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
- * any path that turns into a downcall to one of our TLB handlers.
- *
- * FIXME: if we end up never using this path, perhaps we should
- * prevent the hypervisor from generating downcalls in this case.
- * The advantage of getting a downcall is we can panic in Linux.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_2
- {
- bltz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
- move r0, sp
- }
- .endif
-
-2:
- /*
- * SYSTEM_SAVE_K_0 holds the cpu number in the high bits, and
- * the current stack top in the lower bits. So we recover
- * our starting stack value by sign-extending the low bits, then
- * point sp at the top aligned address on the actual stack page.
- */
- mfspr r0, SPR_SYSTEM_SAVE_K_0
- bfexts r0, r0, 0, CPU_SHIFT-1
-
-0:
- /*
- * Align the stack mod 64 so we can properly predict what
- * cache lines we need to write-hint to reduce memory fetch
- * latency as we enter the kernel. The layout of memory is
- * as follows, with cache line 0 at the lowest VA, and cache
- * line 8 just below the r0 value this "andi" computes.
- * Note that we never write to cache line 8, and we skip
- * cache lines 1-3 for syscalls.
- *
- * cache line 8: ptregs padding (two words)
- * cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch
- * cache line 6: r46...r53 (tp)
- * cache line 5: r38...r45
- * cache line 4: r30...r37
- * cache line 3: r22...r29
- * cache line 2: r14...r21
- * cache line 1: r6...r13
- * cache line 0: 2 x frame, r0..r5
- */
-#if STACK_TOP_DELTA != 64
-#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
-#endif
- andi r0, r0, -64
-
- /*
- * Push the first four registers on the stack, so that we can set
- * them to vector-unique values before we jump to the common code.
- *
- * Registers are pushed on the stack as a struct pt_regs,
- * with the sp initially just above the struct, and when we're
- * done, sp points to the base of the struct, minus
- * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
- *
- * This routine saves just the first four registers, plus the
- * stack context so we can do proper backtracing right away,
- * and defers to handle_interrupt to save the rest.
- * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
- * and needs sp set to its final location at the bottom of
- * the stack frame.
- */
- addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
- wh64 r0 /* cache line 7 */
- {
- st r0, lr
- addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- }
- {
- st r0, sp
- addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
- }
- wh64 sp /* cache line 6 */
- {
- st sp, r52
- addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
- }
- wh64 sp /* cache line 0 */
- {
- st sp, r1
- addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
- }
- {
- st sp, r2
- addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
- }
- {
- st sp, r3
- addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
- }
- mfspr r0, SPR_EX_CONTEXT_K_0
- .ifc \processing,handle_syscall
- /*
- * Bump the saved PC by one bundle so that when we return, we won't
- * execute the same swint instruction again. We need to do this while
- * we're in the critical section.
- */
- addi r0, r0, 8
- .endif
- {
- st sp, r0
- addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r0, SPR_EX_CONTEXT_K_1
- {
- st sp, r0
- addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- /*
- * Use r0 for syscalls so it's a temporary; use r1 for interrupts
- * so that it gets passed through unchanged to the handler routine.
- * Note that the .if conditional confusingly spans bundles.
- */
- .ifc \processing,handle_syscall
- movei r0, \vecnum
- }
- {
- st sp, r0
- .else
- movei r1, \vecnum
- }
- {
- st sp, r1
- .endif
- addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
- }
- mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
- {
- st sp, r0
- addi sp, sp, -PTREGS_OFFSET_REG(0) - 8
- }
- {
- st sp, zero /* write zero into "Next SP" frame pointer */
- addi sp, sp, -8 /* leave SP pointing at bottom of frame */
- }
- .ifc \processing,handle_syscall
- j handle_syscall
- .else
- /* Capture per-interrupt SPR context to registers. */
- .ifc \c_routine, do_page_fault
- mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
- mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
- .else
- .ifc \vecnum, INT_ILL_TRANS
- mfspr r2, ILL_VA_PC
- .else
- .ifc \vecnum, INT_DOUBLE_FAULT
- mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
- .else
- .ifc \c_routine, do_trap
- mfspr r2, GPV_REASON
- .else
- .ifc \c_routine, handle_perf_interrupt
- mfspr r2, PERF_COUNT_STS
- .else
- .ifc \c_routine, handle_perf_interrupt
- mfspr r2, AUX_PERF_COUNT_STS
- .endif
- .ifc \c_routine, do_nmi
- mfspr r2, SPR_SYSTEM_SAVE_K_2 /* nmi type */
- .else
- .endif
- .endif
- .endif
- .endif
- .endif
- .endif
- /* Put function pointer in r0 */
- moveli r0, hw2_last(\c_routine)
- shl16insli r0, r0, hw1(\c_routine)
- {
- shl16insli r0, r0, hw0(\c_routine)
- j \processing
- }
- .endif
- ENDPROC(intvec_\vecname)
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- .pushsection .text.intvec_feedback,"ax"
- .org (\vecnum << 5)
- FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
- jrp lr
- .popsection
-#endif
-
- .endm
-
-
- /*
- * Save the rest of the registers that we didn't save in the actual
- * vector itself. We can't use r0-r10 inclusive here.
- */
- .macro finish_interrupt_save, function
-
- /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
- PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
- {
- .ifc \function,handle_syscall
- st r52, r0
- .else
- st r52, zero
- .endif
- PTREGS_PTR(r52, PTREGS_OFFSET_TP)
- }
- st r52, tp
- {
- mfspr tp, CMPEXCH_VALUE
- PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH)
- }
-
- /*
- * For ordinary syscalls, we save neither caller- nor callee-
- * save registers, since the syscall invoker doesn't expect the
- * caller-saves to be saved, and the called kernel functions will
- * take care of saving the callee-saves for us.
- *
- * For interrupts we save just the caller-save registers. Saving
- * them is required (since the "caller" can't save them). Again,
- * the called kernel functions will restore the callee-save
- * registers for us appropriately.
- *
- * On return, we normally restore nothing special for syscalls,
- * and just the caller-save registers for interrupts.
- *
- * However, there are some important caveats to all this:
- *
- * - We always save a few callee-save registers to give us
- * some scratchpad registers to carry across function calls.
- *
- * - fork/vfork/etc require us to save all the callee-save
- * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
- *
- * - We always save r0..r5 and r10 for syscalls, since we need
- * to reload them a bit later for the actual kernel call, and
- * since we might need them for -ERESTARTNOINTR, etc.
- *
- * - Before invoking a signal handler, we save the unsaved
- * callee-save registers so they are visible to the
- * signal handler or any ptracer.
- *
- * - If the unsaved callee-save registers are modified, we set
- * a bit in pt_regs so we know to reload them from pt_regs
- * and not just rely on the kernel function unwinding.
- * (Done for ptrace register writes and SA_SIGINFO handler.)
- */
- {
- st r52, tp
- PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
- }
- wh64 r52 /* cache line 4 */
- push_reg r33, r52
- push_reg r32, r52
- push_reg r31, r52
- .ifc \function,handle_syscall
- push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
- push_reg TREG_SYSCALL_NR_NAME, r52, \
- PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
- .else
-
- push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
- wh64 r52 /* cache line 3 */
- push_reg r29, r52
- push_reg r28, r52
- push_reg r27, r52
- push_reg r26, r52
- push_reg r25, r52
- push_reg r24, r52
- push_reg r23, r52
- push_reg r22, r52
- wh64 r52 /* cache line 2 */
- push_reg r21, r52
- push_reg r20, r52
- push_reg r19, r52
- push_reg r18, r52
- push_reg r17, r52
- push_reg r16, r52
- push_reg r15, r52
- push_reg r14, r52
- wh64 r52 /* cache line 1 */
- push_reg r13, r52
- push_reg r12, r52
- push_reg r11, r52
- push_reg r10, r52
- push_reg r9, r52
- push_reg r8, r52
- push_reg r7, r52
- push_reg r6, r52
-
- .endif
-
- push_reg r5, r52
- st r52, r4
-
- /*
- * If we will be returning to the kernel, we will need to
- * reset the interrupt masks to the state they had before.
- * Set DISABLE_IRQ in flags iff we came from kernel pl with
- * irqs disabled.
- */
- mfspr r32, SPR_EX_CONTEXT_K_1
- {
- IS_KERNEL_EX1(r32, r32)
- PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
- }
- beqzt r32, 1f /* zero if from user space */
- IRQS_DISABLED(r32) /* zero if irqs enabled */
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
-#endif
-1:
- .ifnc \function,handle_syscall
- /* Record the fact that we saved the caller-save registers above. */
- ori r32, r32, PT_FLAGS_CALLER_SAVES
- .endif
- st r21, r32
-
- /*
- * we've captured enough state to the stack (including in
- * particular our EX_CONTEXT state) that we can now release
- * the interrupt critical section and replace it with our
- * standard "interrupts disabled" mask value. This allows
- * synchronous interrupts (and profile interrupts) to punch
- * through from this point onwards.
- *
- * It's important that no code before this point touch memory
- * other than our own stack (to keep the invariant that this
- * is all that gets touched under ICS), and that no code after
- * this point reference any interrupt-specific SPR, in particular
- * the EX_CONTEXT_K_ values.
- */
- .ifc \function,handle_nmi
- IRQ_DISABLE_ALL(r20)
- .else
- IRQ_DISABLE(r20, r21)
- .endif
- mtspr INTERRUPT_CRITICAL_SECTION, zero
-
- /* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
- {
- mfspr r20, SPR_SYSTEM_SAVE_K_0
- moveli r21, hw2_last(__per_cpu_offset)
- }
- {
- shl16insli r21, r21, hw1(__per_cpu_offset)
- bfextu r20, r20, CPU_SHIFT, 63
- }
- shl16insli r21, r21, hw0(__per_cpu_offset)
- shl3add r20, r20, r21
- ld tp, r20
-#else
- move tp, zero
-#endif
-
-#ifdef __COLLECT_LINKER_FEEDBACK__
- /*
- * Notify the feedback routines that we were in the
- * appropriate fixed interrupt vector area. Note that we
- * still have ICS set at this point, so we can't invoke any
- * atomic operations or we will panic. The feedback
- * routines internally preserve r0..r10 and r30 up.
- */
- .ifnc \function,handle_syscall
- shli r20, r1, 5
- .else
- moveli r20, INT_SWINT_1 << 5
- .endif
- moveli r21, hw2_last(intvec_feedback)
- shl16insli r21, r21, hw1(intvec_feedback)
- shl16insli r21, r21, hw0(intvec_feedback)
- add r20, r20, r21
- jalr r20
-
- /* And now notify the feedback routines that we are here. */
- FEEDBACK_ENTER(\function)
-#endif
-
- /*
- * Prepare the first 256 stack bytes to be rapidly accessible
- * without having to fetch the background data.
- */
- addi r52, sp, -64
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- {
- wh64 r52
- addi r52, r52, -64
- }
- wh64 r52
-
-#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
- .ifnc \function,handle_nmi
- /*
- * We finally have enough state set up to notify the irq
- * tracing code that irqs were disabled on entry to the handler.
- * The TRACE_IRQS_OFF call clobbers registers r0-r29.
- * For syscalls, we already have the register state saved away
- * on the stack, so we don't bother to do any register saves here,
- * and later we pop the registers back off the kernel stack.
- * For interrupt handlers, save r0-r3 in callee-saved registers.
- */
- .ifnc \function,handle_syscall
- { move r30, r0; move r31, r1 }
- { move r32, r2; move r33, r3 }
- .endif
- TRACE_IRQS_OFF
-#ifdef CONFIG_CONTEXT_TRACKING
- jal context_tracking_user_exit
-#endif
- .ifnc \function,handle_syscall
- { move r0, r30; move r1, r31 }
- { move r2, r32; move r3, r33 }
- .endif
- .endif
-#endif
-
- .endm
-
- /*
- * Redispatch a downcall.
- */
- .macro dc_dispatch vecnum, vecname
- .org (\vecnum << 8)
-intvec_\vecname:
- j _hv_downcall_dispatch
- ENDPROC(intvec_\vecname)
- .endm
-
- /*
- * Common code for most interrupts. The C function we're eventually
- * going to is in r0, and the faultnum is in r1; the original
- * values for those registers are on the stack.
- */
- .pushsection .text.handle_interrupt,"ax"
-handle_interrupt:
- finish_interrupt_save handle_interrupt
-
- /* Jump to the C routine; it should enable irqs as soon as possible. */
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_interrupt)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(handle_interrupt)
-
-/*
- * This routine takes a boolean in r30 indicating if this is an NMI.
- * If so, we also expect a boolean in r31 indicating whether to
- * re-enable the oprofile interrupts.
- *
- * Note that .Lresume_userspace is jumped to directly in several
- * places, and we need to make sure r30 is set correctly in those
- * callers as well.
- */
-STD_ENTRY(interrupt_return)
- /* If we're resuming to kernel space, don't check thread flags. */
- {
- bnez r30, .Lrestore_all /* NMIs don't special-case user-space */
- PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
- }
- ld r29, r29
- IS_KERNEL_EX1(r29, r29)
- {
- beqzt r29, .Lresume_userspace
- move r29, sp
- }
-
-#ifdef CONFIG_PREEMPT
- /* Returning to kernel space. Check if we need preemption. */
- EXTRACT_THREAD_INFO(r29)
- addli r28, r29, THREAD_INFO_FLAGS_OFFSET
- {
- ld r28, r28
- addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
- }
- {
- andi r28, r28, _TIF_NEED_RESCHED
- ld4s r29, r29
- }
- beqzt r28, 1f
- bnez r29, 1f
- /* Disable interrupts explicitly for preemption. */
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- jal preempt_schedule_irq
- FEEDBACK_REENTER(interrupt_return)
-1:
-#endif
-
- /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
- {
- moveli r27, hw2_last(_cpu_idle_nap)
- PTREGS_PTR(r29, PTREGS_OFFSET_PC)
- }
- {
- ld r28, r29
- shl16insli r27, r27, hw1(_cpu_idle_nap)
- }
- {
- shl16insli r27, r27, hw0(_cpu_idle_nap)
- }
- {
- cmpeq r27, r27, r28
- }
- {
- blbc r27, .Lrestore_all
- addi r28, r28, 8
- }
- st r29, r28
- j .Lrestore_all
-
-.Lresume_userspace:
- FEEDBACK_REENTER(interrupt_return)
-
- /*
- * Disable interrupts so as to make sure we don't
- * miss an interrupt that sets any of the thread flags (like
- * need_resched or sigpending) between sampling and the iret.
- * Routines like schedule() or do_signal() may re-enable
- * interrupts before returning.
- */
- IRQ_DISABLE(r20, r21)
- TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
-
- /*
- * See if there are any work items (including single-shot items)
- * to do. If so, save the callee-save registers to pt_regs
- * and then dispatch to C code.
- */
- move r21, sp
- EXTRACT_THREAD_INFO(r21)
- {
- addi r22, r21, THREAD_INFO_FLAGS_OFFSET
- moveli r20, hw1_last(_TIF_ALLWORK_MASK)
- }
- {
- ld r22, r22
- shl16insli r20, r20, hw0(_TIF_ALLWORK_MASK)
- }
- and r1, r22, r20
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- beqzt r1, .Lrestore_all
- }
- push_extra_callee_saves r0
- jal prepare_exit_to_usermode
-
- /*
- * In the NMI case we
- * omit the call to single_process_check_nohz, which normally checks
- * to see if we should start or stop the scheduler tick, because
- * we can't call arbitrary Linux code from an NMI context.
- * We always call the homecache TLB deferral code to re-trigger
- * the deferral mechanism.
- *
- * The other chunk of responsibility this code has is to reset the
- * interrupt masks appropriately to reset irqs and NMIs. We have
- * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
- * lockdep-type stuff, but we can't set ICS until afterwards, since
- * ICS can only be used in very tight chunks of code to avoid
- * tripping over various assertions that it is off.
- */
-.Lrestore_all:
- PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
- {
- ld r0, r0
- PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
- }
- {
- IS_KERNEL_EX1(r0, r0)
- ld r32, r32
- }
- bnez r0, 1f
- j 2f
-#if PT_FLAGS_DISABLE_IRQ != 1
-# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below
-#endif
-1: blbct r32, 2f
- IRQ_DISABLE(r20,r21)
- TRACE_IRQS_OFF
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- beqzt r30, .Lrestore_regs
- j 3f
-2: TRACE_IRQS_ON
- IRQ_ENABLE_LOAD(r20, r21)
- movei r0, 1
- mtspr INTERRUPT_CRITICAL_SECTION, r0
- IRQ_ENABLE_APPLY(r20, r21)
- beqzt r30, .Lrestore_regs
-3:
-
-#if INT_PERF_COUNT + 1 != INT_AUX_PERF_COUNT
-# error Bad interrupt assumption
-#endif
- {
- movei r0, 3 /* two adjacent bits for the PERF_COUNT mask */
- beqz r31, .Lrestore_regs
- }
- shli r0, r0, INT_PERF_COUNT
- mtspr SPR_INTERRUPT_MASK_RESET_K, r0
-
- /*
- * We now commit to returning from this interrupt, since we will be
- * doing things like setting EX_CONTEXT SPRs and unwinding the stack
- * frame. No calls should be made to any other code after this point.
- * This code should only be entered with ICS set.
- * r32 must still be set to ptregs.flags.
- * We launch loads to each cache line separately first, so we can
- * get some parallelism out of the memory subsystem.
- * We start zeroing caller-saved registers throughout, since
- * that will save some cycles if this turns out to be a syscall.
- */
-.Lrestore_regs:
-
- /*
- * Rotate so we have one high bit and one low bit to test.
- * - low bit says whether to restore all the callee-saved registers,
- * or just r30-r33, and r52 up.
- * - high bit (i.e. sign bit) says whether to restore all the
- * caller-saved registers, or just r0.
- */
-#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
-# error Rotate trick does not work :-)
-#endif
- {
- rotli r20, r32, 62
- PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
- }
-
- /*
- * Load cache lines 0, 4, 6 and 7, in that order, then use
- * the last loaded value, which makes it likely that the other
- * cache lines have also loaded, at which point we should be
- * able to safely read all the remaining words on those cache
- * lines without waiting for the memory subsystem.
- */
- pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
- pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30)
- pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52)
- pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH
- pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1
- {
- mtspr CMPEXCH_VALUE, r21
- move r4, zero
- }
- pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
- {
- mtspr SPR_EX_CONTEXT_K_1, lr
- IS_KERNEL_EX1(lr, lr)
- }
- {
- mtspr SPR_EX_CONTEXT_K_0, r21
- move r5, zero
- }
-
- /* Restore callee-saveds that we actually use. */
- pop_reg_zero r31, r6
- pop_reg_zero r32, r7
- pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
-
- /*
- * If we modified other callee-saveds, restore them now.
- * This is rare, but could be via ptrace or signal handler.
- */
- {
- move r9, zero
- blbs r20, .Lrestore_callees
- }
-.Lcontinue_restore_regs:
-
- /* Check if we're returning from a syscall. */
- {
- move r10, zero
- bltzt r20, 1f /* no, so go restore callee-save registers */
- }
-
- /*
- * Check if we're returning to userspace.
- * Note that if we're not, we don't worry about zeroing everything.
- */
- {
- addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
- bnez lr, .Lkernel_return
- }
-
- /*
- * On return from syscall, we've restored r0 from pt_regs, but we
- * clear the remainder of the caller-saved registers. We could
- * restore the syscall arguments, but there's not much point,
- * and it ensures user programs aren't trying to use the
- * caller-saves if we clear them, as well as avoiding leaking
- * kernel pointers into userspace.
- */
- pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- {
- ld sp, sp
- move r13, zero
- move r14, zero
- }
- { move r15, zero; move r16, zero }
- { move r17, zero; move r18, zero }
- { move r19, zero; move r20, zero }
- { move r21, zero; move r22, zero }
- { move r23, zero; move r24, zero }
- { move r25, zero; move r26, zero }
-
- /* Set r1 to errno if we are returning an error, otherwise zero. */
- {
- moveli r29, 4096
- sub r1, zero, r0
- }
- {
- move r28, zero
- cmpltu r29, r1, r29
- }
- {
- mnz r1, r29, r1
- move r29, zero
- }
- iret
-
- /*
- * Not a syscall, so restore caller-saved registers.
- * First kick off loads for cache lines 1-3, which we're touching
- * for the first time here.
- */
- .align 64
-1: pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29)
- pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21)
- pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13)
- pop_reg r1
- pop_reg r2
- pop_reg r3
- pop_reg r4
- pop_reg r5
- pop_reg r6
- pop_reg r7
- pop_reg r8
- pop_reg r9
- pop_reg r10
- pop_reg r11
- pop_reg r12, sp, 16
- /* r13 already restored above */
- pop_reg r14
- pop_reg r15
- pop_reg r16
- pop_reg r17
- pop_reg r18
- pop_reg r19
- pop_reg r20, sp, 16
- /* r21 already restored above */
- pop_reg r22
- pop_reg r23
- pop_reg r24
- pop_reg r25
- pop_reg r26
- pop_reg r27
- pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
- /* r29 already restored above */
- bnez lr, .Lkernel_return
- pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
- pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
- ld sp, sp
- iret
-
- /*
- * We can't restore tp when in kernel mode, since a thread might
- * have migrated from another cpu and brought a stale tp value.
- */
-.Lkernel_return:
- pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
- ld sp, sp
- iret
-
- /* Restore callee-saved registers from r34 to r51. */
-.Lrestore_callees:
- addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
- pop_reg r34
- pop_reg r35
- pop_reg r36
- pop_reg r37
- pop_reg r38
- pop_reg r39
- pop_reg r40
- pop_reg r41
- pop_reg r42
- pop_reg r43
- pop_reg r44
- pop_reg r45
- pop_reg r46
- pop_reg r47
- pop_reg r48
- pop_reg r49
- pop_reg r50
- pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
- j .Lcontinue_restore_regs
- STD_ENDPROC(interrupt_return)
-
- /*
- * "NMI" interrupts mask ALL interrupts before calling the
- * handler, and don't check thread flags, etc., on the way
- * back out. In general, the only things we do here for NMIs
- * are register save/restore and dataplane kernel-TLB management.
- * We don't (for example) deal with start/stop of the sched tick.
- */
- .pushsection .text.handle_nmi,"ax"
-handle_nmi:
- finish_interrupt_save handle_nmi
- {
- jalr r0
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- }
- FEEDBACK_REENTER(handle_nmi)
- {
- movei r30, 1
- cmpeq r31, r0, zero
- }
- j interrupt_return
- STD_ENDPROC(handle_nmi)
-
- /*
- * Parallel code for syscalls to handle_interrupt.
- */
- .pushsection .text.handle_syscall,"ax"
-handle_syscall:
- finish_interrupt_save handle_syscall
-
- /* Enable irqs. */
- TRACE_IRQS_ON
- IRQ_ENABLE(r20, r21)
-
- /* Bump the counter for syscalls made on this tile. */
- moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
- add r20, r20, tp
- ld4s r21, r20
- {
- addi r21, r21, 1
- move r31, sp
- }
- {
- st4 r20, r21
- EXTRACT_THREAD_INFO(r31)
- }
-
- /* Trace syscalls, if requested. */
- addi r31, r31, THREAD_INFO_FLAGS_OFFSET
- {
- ld r30, r31
- moveli r32, _TIF_SYSCALL_ENTRY_WORK
- }
- and r30, r30, r32
- {
- addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
- beqzt r30, .Lrestore_syscall_regs
- }
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_enter
- }
- FEEDBACK_REENTER(handle_syscall)
- bltz r0, .Lsyscall_sigreturn_skip
-
- /*
- * We always reload our registers from the stack at this
- * point. They might be valid, if we didn't build with
- * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
- * doing syscall tracing, but there are enough cases now that it
- * seems simplest just to do the reload unconditionally.
- */
-.Lrestore_syscall_regs:
- {
- ld r30, r30
- PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
- }
- pop_reg r0, r11
- pop_reg r1, r11
- pop_reg r2, r11
- pop_reg r3, r11
- pop_reg r4, r11
- pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
- {
- ld TREG_SYSCALL_NR_NAME, r11
- moveli r21, __NR_syscalls
- }
-
- /* Ensure that the syscall number is within the legal range. */
- {
- moveli r20, hw2(sys_call_table)
-#ifdef CONFIG_COMPAT
- blbs r30, .Lcompat_syscall
-#endif
- }
- {
- cmpltu r21, TREG_SYSCALL_NR_NAME, r21
- shl16insli r20, r20, hw1(sys_call_table)
- }
- {
- blbc r21, .Linvalid_syscall
- shl16insli r20, r20, hw0(sys_call_table)
- }
-.Lload_syscall_pointer:
- shl3add r20, TREG_SYSCALL_NR_NAME, r20
- ld r20, r20
-
- /* Jump to syscall handler. */
- jalr r20
-.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
-
- /*
- * Write our r0 onto the stack so it gets restored instead
- * of whatever the user had there before.
- * In compat mode, sign-extend r0 before storing it.
- */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- blbct r30, 1f
- }
- addxi r0, r0, 0
-1: st r29, r0
-
-.Lsyscall_sigreturn_skip:
- FEEDBACK_REENTER(handle_syscall)
-
- /* Do syscall trace again, if requested. */
- {
- ld r30, r31
- moveli r32, _TIF_SYSCALL_EXIT_WORK
- }
- and r0, r30, r32
- {
- andi r0, r30, _TIF_SINGLESTEP
- beqzt r0, 1f
- }
- {
- PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
- jal do_syscall_trace_exit
- }
- FEEDBACK_REENTER(handle_syscall)
- andi r0, r30, _TIF_SINGLESTEP
-
-1: beqzt r0, 2f
-
- /* Single stepping -- notify ptrace. */
- {
- movei r0, SIGTRAP
- jal ptrace_notify
- }
- FEEDBACK_REENTER(handle_syscall)
-
-2: {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
-
-#ifdef CONFIG_COMPAT
-.Lcompat_syscall:
- /*
- * Load the base of the compat syscall table in r20, and
- * range-check the syscall number (duplicated from 64-bit path).
- * Sign-extend all the user's passed arguments to make them consistent.
- * Also save the original "r(n)" values away in "r(11+n)" in
- * case the syscall table entry wants to validate them.
- */
- moveli r20, hw2(compat_sys_call_table)
- {
- cmpltu r21, TREG_SYSCALL_NR_NAME, r21
- shl16insli r20, r20, hw1(compat_sys_call_table)
- }
- {
- blbc r21, .Linvalid_syscall
- shl16insli r20, r20, hw0(compat_sys_call_table)
- }
- { move r11, r0; addxi r0, r0, 0 }
- { move r12, r1; addxi r1, r1, 0 }
- { move r13, r2; addxi r2, r2, 0 }
- { move r14, r3; addxi r3, r3, 0 }
- { move r15, r4; addxi r4, r4, 0 }
- { move r16, r5; addxi r5, r5, 0 }
- j .Lload_syscall_pointer
-#endif
-
-.Linvalid_syscall:
- /* Report an invalid syscall back to the user program */
- {
- PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
- movei r28, -ENOSYS
- }
- st r29, r28
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(handle_syscall)
-
- /* Return the address for oprofile to suppress in backtraces. */
-STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
- lnk r0
- {
- addli r0, r0, .Lhandle_syscall_link - .
- jrp lr
- }
- STD_ENDPROC(handle_syscall_link_address)
-
-STD_ENTRY(ret_from_fork)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- movei r30, 0 /* not an NMI */
- j .Lresume_userspace /* jump into middle of interrupt_return */
- }
- STD_ENDPROC(ret_from_fork)
-
-STD_ENTRY(ret_from_kernel_thread)
- jal sim_notify_fork
- jal schedule_tail
- FEEDBACK_REENTER(ret_from_fork)
- {
- move r0, r31
- jalr r30
- }
- FEEDBACK_REENTER(ret_from_kernel_thread)
- {
- movei r30, 0 /* not an NMI */
- j interrupt_return
- }
- STD_ENDPROC(ret_from_kernel_thread)
-
-/* Various stub interrupt handlers and syscall handlers */
-
-STD_ENTRY_LOCAL(_kernel_double_fault)
- mfspr r1, SPR_EX_CONTEXT_K_0
- move r2, lr
- move r3, sp
- move r4, r52
- addi sp, sp, -C_ABI_SAVE_AREA_SIZE
- j kernel_double_fault
- STD_ENDPROC(_kernel_double_fault)
-
-STD_ENTRY_LOCAL(bad_intr)
- mfspr r2, SPR_EX_CONTEXT_K_0
- panic "Unhandled interrupt %#x: PC %#lx"
- STD_ENDPROC(bad_intr)
-
-/*
- * Special-case sigreturn to not write r0 to the stack on return.
- * This is technically more efficient, but it also avoids difficulties
- * in the 64-bit OS when handling 32-bit compat code, since we must not
- * sign-extend r0 for the sigreturn return-value case.
- */
-#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
- STD_ENTRY(_##x); \
- addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
- { \
- PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
- j x \
- }; \
- STD_ENDPROC(_##x)
-
-PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
-#ifdef CONFIG_COMPAT
-PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0)
-#endif
-
-/* Save additional callee-saves to pt_regs and jump to standard function. */
-STD_ENTRY(_sys_clone)
- push_extra_callee_saves r4
- j sys_clone
- STD_ENDPROC(_sys_clone)
-
- /*
- * Recover r3, r2, r1 and r0 here saved by unalign fast vector.
- * The vector area limit is 32 bundles, so we handle the reload here.
- * r0, r1, r2 are in thread_info from low to high memory in order.
- * r3 points to location the original r3 was saved.
- * We put this code in the __HEAD section so it can be reached
- * via a conditional branch from the fast path.
- */
- __HEAD
-hand_unalign_slow:
- andi sp, sp, ~1
-hand_unalign_slow_badsp:
- addi r3, r3, -(3 * 8)
- ld_add r0, r3, 8
- ld_add r1, r3, 8
- ld r2, r3
-hand_unalign_slow_nonuser:
- mfspr r3, SPR_SYSTEM_SAVE_K_1
- __int_hand INT_UNALIGN_DATA, UNALIGN_DATA_SLOW, int_unalign
-
-/* The unaligned data support needs to read all the registers. */
-int_unalign:
- push_extra_callee_saves r0
- j do_unaligned
-ENDPROC(hand_unalign_slow)
-
-/* Fill the return address stack with nonzero entries. */
-STD_ENTRY(fill_ra_stack)
- {
- move r0, lr
- jal 1f
- }
-1: jal 2f
-2: jal 3f
-3: jal 4f
-4: jrp r0
- STD_ENDPROC(fill_ra_stack)
-
- .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
- .org (\vecnum << 8)
- __int_hand \vecnum, \vecname, \c_routine, \processing
- .endm
-
-/* Include .intrpt array of interrupt vectors */
- .section ".intrpt", "ax"
- .global intrpt_start
-intrpt_start:
-
-#ifndef CONFIG_USE_PMC
-#define handle_perf_interrupt bad_intr
-#endif
-
-#ifndef CONFIG_HARDWALL
-#define do_hardwall_trap bad_intr
-#endif
-
- int_hand INT_MEM_ERROR, MEM_ERROR, do_trap
- int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
- int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr
-#else
- int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr
- int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle
-#endif
- int_hand INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr
- int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
- int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
- int_hand INT_ITLB_MISS, ITLB_MISS, do_page_fault
- int_hand INT_ILL, ILL, do_trap
- int_hand INT_GPV, GPV, do_trap
- int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
- int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
- int_hand INT_SWINT_3, SWINT_3, do_trap
- int_hand INT_SWINT_2, SWINT_2, do_trap
- int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
- int_hand INT_SWINT_0, SWINT_0, do_trap
- int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
- int_hand_unalign_fast INT_UNALIGN_DATA, UNALIGN_DATA
- int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
- int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
- int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
- int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
- int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
- int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
- int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
- int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
- int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
- int_hand INT_IPI_3, IPI_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- int_hand INT_IPI_2, IPI_2, tile_dev_intr
- int_hand INT_IPI_1, IPI_1, bad_intr
-#else
- int_hand INT_IPI_2, IPI_2, bad_intr
- int_hand INT_IPI_1, IPI_1, tile_dev_intr
-#endif
- int_hand INT_IPI_0, IPI_0, bad_intr
- int_hand INT_PERF_COUNT, PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
- int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
- handle_perf_interrupt, handle_nmi
- int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
-#if CONFIG_KERNEL_PL == 2
- dc_dispatch INT_INTCTRL_2, INTCTRL_2
- int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
-#else
- int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
- dc_dispatch INT_INTCTRL_1, INTCTRL_1
-#endif
- int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
- int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
- hv_message_intr
- int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
- int_hand INT_I_ASID, I_ASID, bad_intr
- int_hand INT_D_ASID, D_ASID, bad_intr
- int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
-
- /* Synthetic interrupt delivered only by the simulator */
- int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
- /* Synthetic interrupt delivered by hv */
- int_hand INT_NMI_DWNCL, NMI_DWNCL, do_nmi, handle_nmi
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
deleted file mode 100644
index 22044fc691ef..000000000000
--- a/arch/tile/kernel/irq.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/uaccess.h>
-#include <hv/drv_pcie_rc_intf.h>
-#include <arch/spr_def.h>
-#include <asm/traps.h>
-#include <linux/perf_event.h>
-
-/* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
-#define IS_HW_CLEARED 1
-
-/*
- * The set of interrupts we enable for arch_local_irq_enable().
- * This is initialized to have just a single interrupt that the kernel
- * doesn't actually use as a sentinel. During kernel init,
- * interrupts are added as the kernel gets prepared to support them.
- * NOTE: we could probably initialize them all statically up front.
- */
-DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
- INITIAL_INTERRUPTS_ENABLED;
-EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
-
-/* Define per-tile device interrupt statistics state. */
-DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
-EXPORT_PER_CPU_SYMBOL(irq_stat);
-
-/*
- * Define per-tile irq disable mask; the hardware/HV only has a single
- * mask that we use to implement both masking and disabling.
- */
-static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
- ____cacheline_internodealigned_in_smp;
-
-/*
- * Per-tile IRQ nesting depth. Used to make sure we enable newly
- * enabled IRQs before exiting the outermost interrupt.
- */
-static DEFINE_PER_CPU(int, irq_depth);
-
-#if CHIP_HAS_IPI()
-/* Use SPRs to manipulate device interrupts. */
-#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
-#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
-#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
-#else
-/* Use HV to manipulate device interrupts. */
-#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
-#define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
-#define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
-#endif
-
-/*
- * The interrupt handling path, implemented in terms of HV interrupt
- * emulation on TILEPro, and IPI hardware on TILE-Gx.
- * Entered with interrupts disabled.
- */
-void tile_dev_intr(struct pt_regs *regs, int intnum)
-{
- int depth = __this_cpu_inc_return(irq_depth);
- unsigned long original_irqs;
- unsigned long remaining_irqs;
- struct pt_regs *old_regs;
-
-#if CHIP_HAS_IPI()
- /*
- * Pending interrupts are listed in an SPR. We might be
- * nested, so be sure to only handle irqs that weren't already
- * masked by a previous interrupt. Then, mask out the ones
- * we're going to handle.
- */
- unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
- original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
- __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
-#else
- /*
- * Hypervisor performs the equivalent of the Gx code above and
- * then puts the pending interrupt mask into a system save reg
- * for us to find.
- */
- original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
-#endif
- remaining_irqs = original_irqs;
-
- /* Track time spent here in an interrupt context. */
- old_regs = set_irq_regs(regs);
- irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: less than 1/8th stack free? */
- {
- long sp = stack_pointer - (long) current_thread_info();
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("%s: stack overflow: %ld\n",
- __func__, sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
- while (remaining_irqs) {
- unsigned long irq = __ffs(remaining_irqs);
- remaining_irqs &= ~(1UL << irq);
-
- /* Count device irqs; Linux IPIs are counted elsewhere. */
- if (irq != IRQ_RESCHEDULE)
- __this_cpu_inc(irq_stat.irq_dev_intr_count);
-
- generic_handle_irq(irq);
- }
-
- /*
- * If we weren't nested, turn on all enabled interrupts,
- * including any that were reenabled during interrupt
- * handling.
- */
- if (depth == 1)
- unmask_irqs(~__this_cpu_read(irq_disable_mask));
-
- __this_cpu_dec(irq_depth);
-
- /*
- * Track time spent against the current process again and
- * process any softirqs if they are waiting.
- */
- irq_exit();
- set_irq_regs(old_regs);
-}
-
-
-/*
- * Remove an irq from the disabled mask. If we're in an interrupt
- * context, defer enabling the HW interrupt until we leave.
- */
-static void tile_irq_chip_enable(struct irq_data *d)
-{
- get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
- if (__this_cpu_read(irq_depth) == 0)
- unmask_irqs(1UL << d->irq);
- put_cpu_var(irq_disable_mask);
-}
-
-/*
- * Add an irq to the disabled mask. We disable the HW interrupt
- * immediately so that there's no possibility of it firing. If we're
- * in an interrupt context, the return path is careful to avoid
- * unmasking a newly disabled interrupt.
- */
-static void tile_irq_chip_disable(struct irq_data *d)
-{
- get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
- mask_irqs(1UL << d->irq);
- put_cpu_var(irq_disable_mask);
-}
-
-/* Mask an interrupt. */
-static void tile_irq_chip_mask(struct irq_data *d)
-{
- mask_irqs(1UL << d->irq);
-}
-
-/* Unmask an interrupt. */
-static void tile_irq_chip_unmask(struct irq_data *d)
-{
- unmask_irqs(1UL << d->irq);
-}
-
-/*
- * Clear an interrupt before processing it so that any new assertions
- * will trigger another irq.
- */
-static void tile_irq_chip_ack(struct irq_data *d)
-{
- if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
- clear_irqs(1UL << d->irq);
-}
-
-/*
- * For per-cpu interrupts, we need to avoid unmasking any interrupts
- * that we disabled via disable_percpu_irq().
- */
-static void tile_irq_chip_eoi(struct irq_data *d)
-{
- if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
- unmask_irqs(1UL << d->irq);
-}
-
-static struct irq_chip tile_irq_chip = {
- .name = "tile_irq_chip",
- .irq_enable = tile_irq_chip_enable,
- .irq_disable = tile_irq_chip_disable,
- .irq_ack = tile_irq_chip_ack,
- .irq_eoi = tile_irq_chip_eoi,
- .irq_mask = tile_irq_chip_mask,
- .irq_unmask = tile_irq_chip_unmask,
-};
-
-void __init init_IRQ(void)
-{
- ipi_init();
-}
-
-void setup_irq_regs(void)
-{
- /* Enable interrupt delivery. */
- unmask_irqs(~0UL);
-#if CHIP_HAS_IPI()
- arch_local_irq_unmask(INT_IPI_K);
-#endif
-}
-
-void tile_irq_activate(unsigned int irq, int tile_irq_type)
-{
- /*
- * We use handle_level_irq() by default because the pending
- * interrupt vector (whether modeled by the HV on
- * TILEPro or implemented in hardware on TILE-Gx) has
- * level-style semantics for each bit. An interrupt fires
- * whenever a bit is high, not just at edges.
- */
- irq_flow_handler_t handle = handle_level_irq;
- if (tile_irq_type == TILE_IRQ_PERCPU)
- handle = handle_percpu_irq;
- irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
-
- /*
- * Flag interrupts that are hardware-cleared so that ack()
- * won't clear them.
- */
- if (tile_irq_type == TILE_IRQ_HW_CLEAR)
- irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
-}
-EXPORT_SYMBOL(tile_irq_activate);
-
-
-void ack_bad_irq(unsigned int irq)
-{
- pr_err("unexpected IRQ trap at vector %02x\n", irq);
-}
-
-/*
- * /proc/interrupts printing:
- */
-int arch_show_interrupts(struct seq_file *p, int prec)
-{
-#ifdef CONFIG_PERF_EVENTS
- int i;
-
- seq_printf(p, "%*s: ", prec, "PMI");
-
- for_each_online_cpu(i)
- seq_printf(p, "%10llu ", per_cpu(perf_irqs, i));
- seq_puts(p, " perf_events\n");
-#endif
- return 0;
-}
-
-#if CHIP_HAS_IPI()
-int arch_setup_hwirq(unsigned int irq, int node)
-{
- return irq >= NR_IRQS ? -EINVAL : 0;
-}
-
-void arch_teardown_hwirq(unsigned int irq) { }
-#endif
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
deleted file mode 100644
index 93931a46625b..000000000000
--- a/arch/tile/kernel/jump_label.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * jump label TILE-Gx support
- */
-
-#include <linux/jump_label.h>
-#include <linux/memory.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/cpu.h>
-
-#include <asm/cacheflush.h>
-#include <asm/insn.h>
-
-#ifdef HAVE_JUMP_LABEL
-
-static void __jump_label_transform(struct jump_entry *e,
- enum jump_label_type type)
-{
- tilegx_bundle_bits opcode;
- /* Operate on writable kernel text mapping. */
- unsigned long pc_wr = ktext_writable_addr(e->code);
-
- if (type == JUMP_LABEL_JMP)
- opcode = tilegx_gen_branch(e->code, e->target, false);
- else
- opcode = NOP();
-
- *(tilegx_bundle_bits *)pc_wr = opcode;
- /* Make sure that above mem writes were issued towards the memory. */
- smp_wmb();
-}
-
-void arch_jump_label_transform(struct jump_entry *e,
- enum jump_label_type type)
-{
- mutex_lock(&text_mutex);
-
- __jump_label_transform(e, type);
- flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
-
- mutex_unlock(&text_mutex);
-}
-
-__init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
- enum jump_label_type type)
-{
- __jump_label_transform(e, type);
-}
-
-#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
deleted file mode 100644
index d4eb5fb2df9d..000000000000
--- a/arch/tile/kernel/kgdb.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx KGDB support.
- */
-
-#include <linux/ptrace.h>
-#include <linux/kgdb.h>
-#include <linux/kdebug.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/sched/task_stack.h>
-
-#include <asm/cacheflush.h>
-
-static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
-static unsigned long stepped_addr;
-static tile_bundle_bits stepped_instr;
-
-struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
- { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
- { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
- { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
- { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
- { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
- { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
- { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
- { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
- { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
- { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
- { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
- { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
- { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
- { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
- { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
- { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
- { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
- { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
- { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
- { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
- { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
- { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
- { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
- { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
- { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
- { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
- { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
- { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
- { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
- { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
- { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
- { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
- { "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
- { "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
- { "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
- { "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
- { "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
- { "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
- { "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
- { "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
- { "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
- { "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
- { "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
- { "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
- { "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
- { "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
- { "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
- { "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
- { "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
- { "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
- { "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
- { "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
- { "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
- { "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
- { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
- { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
- { "sn", GDB_SIZEOF_REG, -1},
- { "idn0", GDB_SIZEOF_REG, -1},
- { "idn1", GDB_SIZEOF_REG, -1},
- { "udn0", GDB_SIZEOF_REG, -1},
- { "udn1", GDB_SIZEOF_REG, -1},
- { "udn2", GDB_SIZEOF_REG, -1},
- { "udn3", GDB_SIZEOF_REG, -1},
- { "zero", GDB_SIZEOF_REG, -1},
- { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
- { "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
-};
-
-char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
-{
- if (regno >= DBG_MAX_REG_NUM || regno < 0)
- return NULL;
-
- if (dbg_reg_def[regno].offset != -1)
- memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
- dbg_reg_def[regno].size);
- else
- memset(mem, 0, dbg_reg_def[regno].size);
- return dbg_reg_def[regno].name;
-}
-
-int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
-{
- if (regno >= DBG_MAX_REG_NUM || regno < 0)
- return -EINVAL;
-
- if (dbg_reg_def[regno].offset != -1)
- memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
- dbg_reg_def[regno].size);
- return 0;
-}
-
-/*
- * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
- * we may not be able to get all the info.
- */
-void
-sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
-{
- struct pt_regs *thread_regs;
- const int NGPRS = TREG_LAST_GPR + 1;
-
- if (task == NULL)
- return;
-
- thread_regs = task_pt_regs(task);
- memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
- memset(&gdb_regs[NGPRS], 0,
- (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
- gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
- gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
-}
-
-void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
-{
- regs->pc = pc;
-}
-
-static void kgdb_call_nmi_hook(void *ignored)
-{
- kgdb_nmicallback(raw_smp_processor_id(), NULL);
-}
-
-void kgdb_roundup_cpus(unsigned long flags)
-{
- local_irq_enable();
- smp_call_function(kgdb_call_nmi_hook, NULL, 0);
- local_irq_disable();
-}
-
-/*
- * Convert a kernel address to the writable kernel text mapping.
- */
-static unsigned long writable_address(unsigned long addr)
-{
- unsigned long ret = 0;
-
- if (core_kernel_text(addr))
- ret = ktext_writable_addr(addr);
- else if (is_module_text_address(addr))
- ret = addr;
- else
- pr_err("Unknown virtual address 0x%lx\n", addr);
-
- return ret;
-}
-
-/*
- * Calculate the new address for after a step.
- */
-static unsigned long get_step_address(struct pt_regs *regs)
-{
- int src_reg;
- int jump_off;
- int br_off;
- unsigned long addr;
- unsigned int opcode;
- tile_bundle_bits bundle;
-
- /* Move to the next instruction by default. */
- addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
- bundle = *(unsigned long *)instruction_pointer(regs);
-
- /* 0: X mode, Otherwise: Y mode. */
- if (bundle & TILEGX_BUNDLE_MODE_MASK) {
- if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
- get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) {
- opcode = get_UnaryOpcodeExtension_Y1(bundle);
-
- switch (opcode) {
- case JALR_UNARY_OPCODE_Y1:
- case JALRP_UNARY_OPCODE_Y1:
- case JR_UNARY_OPCODE_Y1:
- case JRP_UNARY_OPCODE_Y1:
- src_reg = get_SrcA_Y1(bundle);
- dbg_get_reg(src_reg, &addr, regs);
- break;
- }
- }
- } else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
- if (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) {
- opcode = get_UnaryOpcodeExtension_X1(bundle);
-
- switch (opcode) {
- case JALR_UNARY_OPCODE_X1:
- case JALRP_UNARY_OPCODE_X1:
- case JR_UNARY_OPCODE_X1:
- case JRP_UNARY_OPCODE_X1:
- src_reg = get_SrcA_X1(bundle);
- dbg_get_reg(src_reg, &addr, regs);
- break;
- }
- }
- } else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
- opcode = get_JumpOpcodeExtension_X1(bundle);
-
- switch (opcode) {
- case JAL_JUMP_OPCODE_X1:
- case J_JUMP_OPCODE_X1:
- jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
- addr = regs->pc +
- (jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
- break;
- }
- } else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
- br_off = 0;
- opcode = get_BrType_X1(bundle);
-
- switch (opcode) {
- case BEQZT_BRANCH_OPCODE_X1:
- case BEQZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) == 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BGEZT_BRANCH_OPCODE_X1:
- case BGEZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) >= 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BGTZT_BRANCH_OPCODE_X1:
- case BGTZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) > 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BLBCT_BRANCH_OPCODE_X1:
- case BLBC_BRANCH_OPCODE_X1:
- if (!(get_SrcA_X1(bundle) & 1))
- br_off = get_BrOff_X1(bundle);
- break;
- case BLBST_BRANCH_OPCODE_X1:
- case BLBS_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) & 1)
- br_off = get_BrOff_X1(bundle);
- break;
- case BLEZT_BRANCH_OPCODE_X1:
- case BLEZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) <= 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BLTZT_BRANCH_OPCODE_X1:
- case BLTZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) < 0)
- br_off = get_BrOff_X1(bundle);
- break;
- case BNEZT_BRANCH_OPCODE_X1:
- case BNEZ_BRANCH_OPCODE_X1:
- if (get_SrcA_X1(bundle) != 0)
- br_off = get_BrOff_X1(bundle);
- break;
- }
-
- if (br_off != 0) {
- br_off = sign_extend(br_off, 17);
- addr = regs->pc +
- (br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
- }
- }
-
- return addr;
-}
-
-/*
- * Replace the next instruction after the current instruction with a
- * breakpoint instruction.
- */
-static void do_single_step(struct pt_regs *regs)
-{
- unsigned long addr_wr;
-
- /* Determine where the target instruction will send us to. */
- stepped_addr = get_step_address(regs);
- probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
- BREAK_INSTR_SIZE);
-
- addr_wr = writable_address(stepped_addr);
- probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
- BREAK_INSTR_SIZE);
- smp_wmb();
- flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
-}
-
-static void undo_single_step(struct pt_regs *regs)
-{
- unsigned long addr_wr;
-
- if (stepped_instr == 0)
- return;
-
- addr_wr = writable_address(stepped_addr);
- probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
- BREAK_INSTR_SIZE);
- stepped_instr = 0;
- smp_wmb();
- flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
-}
-
-/*
- * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
- * then try to fall into the debugger.
- */
-static int
-kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
-{
- int ret;
- unsigned long flags;
- struct die_args *args = (struct die_args *)ptr;
- struct pt_regs *regs = args->regs;
-
-#ifdef CONFIG_KPROBES
- /*
- * Return immediately if the kprobes fault notifier has set
- * DIE_PAGE_FAULT.
- */
- if (cmd == DIE_PAGE_FAULT)
- return NOTIFY_DONE;
-#endif /* CONFIG_KPROBES */
-
- switch (cmd) {
- case DIE_BREAK:
- case DIE_COMPILED_BPT:
- break;
- case DIE_SSTEPBP:
- local_irq_save(flags);
- kgdb_handle_exception(0, SIGTRAP, 0, regs);
- local_irq_restore(flags);
- return NOTIFY_STOP;
- default:
- /* Userspace events, ignore. */
- if (user_mode(regs))
- return NOTIFY_DONE;
- }
-
- local_irq_save(flags);
- ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
- local_irq_restore(flags);
- if (ret)
- return NOTIFY_DONE;
-
- return NOTIFY_STOP;
-}
-
-static struct notifier_block kgdb_notifier = {
- .notifier_call = kgdb_notify,
-};
-
-/*
- * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- * @vector: The error vector of the exception that happened.
- * @signo: The signal number of the exception that happened.
- * @err_code: The error code of the exception that happened.
- * @remcom_in_buffer: The buffer of the packet we have read.
- * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- * @regs: The &struct pt_regs of the current process.
- *
- * This function MUST handle the 'c' and 's' command packets,
- * as well packets to set / remove a hardware breakpoint, if used.
- * If there are additional packets which the hardware needs to handle,
- * they are handled here. The code should return -1 if it wants to
- * process more packets, and a %0 or %1 if it wants to exit from the
- * kgdb callback.
- */
-int kgdb_arch_handle_exception(int vector, int signo, int err_code,
- char *remcom_in_buffer, char *remcom_out_buffer,
- struct pt_regs *regs)
-{
- char *ptr;
- unsigned long address;
-
- /* Undo any stepping we may have done. */
- undo_single_step(regs);
-
- switch (remcom_in_buffer[0]) {
- case 'c':
- case 's':
- case 'D':
- case 'k':
- /*
- * Try to read optional parameter, pc unchanged if no parm.
- * If this was a compiled-in breakpoint, we need to move
- * to the next instruction or we will just breakpoint
- * over and over again.
- */
- ptr = &remcom_in_buffer[1];
- if (kgdb_hex2long(&ptr, &address))
- regs->pc = address;
- else if (*(unsigned long *)regs->pc == compiled_bpt)
- regs->pc += BREAK_INSTR_SIZE;
-
- if (remcom_in_buffer[0] == 's') {
- do_single_step(regs);
- kgdb_single_step = 1;
- atomic_set(&kgdb_cpu_doing_single_step,
- raw_smp_processor_id());
- } else
- atomic_set(&kgdb_cpu_doing_single_step, -1);
-
- return 0;
- }
-
- return -1; /* this means that we do not want to exit from the handler */
-}
-
-struct kgdb_arch arch_kgdb_ops;
-
-/*
- * kgdb_arch_init - Perform any architecture specific initialization.
- *
- * This function will handle the initialization of any architecture
- * specific callbacks.
- */
-int kgdb_arch_init(void)
-{
- tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
-
- memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
- return register_die_notifier(&kgdb_notifier);
-}
-
-/*
- * kgdb_arch_exit - Perform any architecture specific uninitialization.
- *
- * This function will handle the uninitialization of any architecture
- * specific callbacks, for dynamic registration and unregistration.
- */
-void kgdb_arch_exit(void)
-{
- unregister_die_notifier(&kgdb_notifier);
-}
-
-int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
-{
- int err;
- unsigned long addr_wr = writable_address(bpt->bpt_addr);
-
- if (addr_wr == 0)
- return -1;
-
- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
- BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
- smp_wmb();
- flush_icache_range((unsigned long)bpt->bpt_addr,
- (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
- return err;
-}
-
-int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
-{
- int err;
- unsigned long addr_wr = writable_address(bpt->bpt_addr);
-
- if (addr_wr == 0)
- return -1;
-
- err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
- BREAK_INSTR_SIZE);
- smp_wmb();
- flush_icache_range((unsigned long)bpt->bpt_addr,
- (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
- return err;
-}
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
deleted file mode 100644
index c68694bb1ad2..000000000000
--- a/arch/tile/kernel/kprobes.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * arch/tile/kernel/kprobes.c
- * Kprobes on TILE-Gx
- *
- * Some portions copied from the MIPS version.
- *
- * Copyright (C) IBM Corporation, 2002, 2004
- * Copyright 2006 Sony Corp.
- * Copyright 2010 Cavium Networks
- *
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kprobes.h>
-#include <linux/kdebug.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-
-#include <arch/opcode.h>
-
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
-DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
-tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE;
-tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
-
-/*
- * Check whether instruction is branch or jump, or if executing it
- * has different results depending on where it is executed (e.g. lnk).
- */
-static int __kprobes insn_has_control(kprobe_opcode_t insn)
-{
- if (get_Mode(insn) != 0) { /* Y-format bundle */
- if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 ||
- get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1)
- return 0;
-
- switch (get_UnaryOpcodeExtension_Y1(insn)) {
- case JALRP_UNARY_OPCODE_Y1:
- case JALR_UNARY_OPCODE_Y1:
- case JRP_UNARY_OPCODE_Y1:
- case JR_UNARY_OPCODE_Y1:
- case LNK_UNARY_OPCODE_Y1:
- return 1;
- default:
- return 0;
- }
- }
-
- switch (get_Opcode_X1(insn)) {
- case BRANCH_OPCODE_X1: /* branch instructions */
- case JUMP_OPCODE_X1: /* jump instructions: j and jal */
- return 1;
-
- case RRR_0_OPCODE_X1: /* other jump instructions */
- if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1)
- return 0;
- switch (get_UnaryOpcodeExtension_X1(insn)) {
- case JALRP_UNARY_OPCODE_X1:
- case JALR_UNARY_OPCODE_X1:
- case JRP_UNARY_OPCODE_X1:
- case JR_UNARY_OPCODE_X1:
- case LNK_UNARY_OPCODE_X1:
- return 1;
- default:
- return 0;
- }
- default:
- return 0;
- }
-}
-
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
-{
- unsigned long addr = (unsigned long)p->addr;
-
- if (addr & (sizeof(kprobe_opcode_t) - 1))
- return -EINVAL;
-
- if (insn_has_control(*p->addr)) {
- pr_notice("Kprobes for control instructions are not supported\n");
- return -EINVAL;
- }
-
- /* insn: must be on special executable page on tile. */
- p->ainsn.insn = get_insn_slot();
- if (!p->ainsn.insn)
- return -ENOMEM;
-
- /*
- * In the kprobe->ainsn.insn[] array we store the original
- * instruction at index zero and a break trap instruction at
- * index one.
- */
- memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
- p->ainsn.insn[1] = breakpoint2_insn;
- p->opcode = *p->addr;
-
- return 0;
-}
-
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
- unsigned long addr_wr;
-
- /* Operate on writable kernel text mapping. */
- addr_wr = ktext_writable_addr(p->addr);
-
- if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
- sizeof(breakpoint_insn)))
- pr_err("%s: failed to enable kprobe\n", __func__);
-
- smp_wmb();
- flush_insn_slot(p);
-}
-
-void __kprobes arch_disarm_kprobe(struct kprobe *kp)
-{
- unsigned long addr_wr;
-
- /* Operate on writable kernel text mapping. */
- addr_wr = ktext_writable_addr(kp->addr);
-
- if (probe_kernel_write((void *)addr_wr, &kp->opcode,
- sizeof(kp->opcode)))
- pr_err("%s: failed to enable kprobe\n", __func__);
-
- smp_wmb();
- flush_insn_slot(kp);
-}
-
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
- if (p->ainsn.insn) {
- free_insn_slot(p->ainsn.insn, 0);
- p->ainsn.insn = NULL;
- }
-}
-
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- kcb->prev_kprobe.kp = kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
- kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc;
-}
-
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
-{
- __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
- kcb->kprobe_status = kcb->prev_kprobe.status;
- kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc;
-}
-
-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- __this_cpu_write(current_kprobe, p);
- kcb->kprobe_saved_pc = regs->pc;
-}
-
-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
-{
- /* Single step inline if the instruction is a break. */
- if (p->opcode == breakpoint_insn ||
- p->opcode == breakpoint2_insn)
- regs->pc = (unsigned long)p->addr;
- else
- regs->pc = (unsigned long)&p->ainsn.insn[0];
-}
-
-static int __kprobes kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *p;
- int ret = 0;
- kprobe_opcode_t *addr;
- struct kprobe_ctlblk *kcb;
-
- addr = (kprobe_opcode_t *)regs->pc;
-
- /*
- * We don't want to be preempted for the entire
- * duration of kprobe processing.
- */
- preempt_disable();
- kcb = get_kprobe_ctlblk();
-
- /* Check we're not actually recursing. */
- if (kprobe_running()) {
- p = get_kprobe(addr);
- if (p) {
- if (kcb->kprobe_status == KPROBE_HIT_SS &&
- p->ainsn.insn[0] == breakpoint_insn) {
- goto no_kprobe;
- }
- /*
- * We have reentered the kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
- return 1;
- } else {
- if (*addr != breakpoint_insn) {
- /*
- * The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate.
- */
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
- }
- goto no_kprobe;
- }
-
- p = get_kprobe(addr);
- if (!p) {
- if (*addr != breakpoint_insn) {
- /*
- * The breakpoint instruction was removed right
- * after we hit it. Another cpu has removed
- * either a probepoint or a debugger breakpoint
- * at this address. In either case, no further
- * handling of this interrupt is appropriate.
- */
- ret = 1;
- }
- /* Not one of ours: let kernel handle it. */
- goto no_kprobe;
- }
-
- set_current_kprobe(p, regs, kcb);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-
- if (p->pre_handler && p->pre_handler(p, regs)) {
- /* Handler has already set things up, so skip ss setup. */
- return 1;
- }
-
-ss_probe:
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
- return 1;
-
-no_kprobe:
- preempt_enable_no_resched();
- return ret;
-}
-
-/*
- * Called after single-stepping. p->addr is the address of the
- * instruction that has been replaced by the breakpoint. To avoid the
- * SMP problems that can occur when we temporarily put back the
- * original opcode to single-step, we single-stepped a copy of the
- * instruction. The address of this copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * breakpoint trap.
- */
-static void __kprobes resume_execution(struct kprobe *p,
- struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- unsigned long orig_pc = kcb->kprobe_saved_pc;
- regs->pc = orig_pc + 8;
-}
-
-static inline int post_kprobe_handler(struct pt_regs *regs)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (!cur)
- return 0;
-
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
-
- resume_execution(cur, regs, kcb);
-
- /* Restore back the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
- goto out;
- }
- reset_current_kprobe();
-out:
- preempt_enable_no_resched();
-
- return 1;
-}
-
-static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- if (kcb->kprobe_status & KPROBE_HIT_SS) {
- /*
- * We are here because the instruction being single
- * stepped caused a page fault. We reset the current
- * kprobe and the ip points back to the probe address
- * and allow the page fault handler to continue as a
- * normal page fault.
- */
- resume_execution(cur, regs, kcb);
- reset_current_kprobe();
- preempt_enable_no_resched();
- }
- return 0;
-}
-
-/*
- * Wrapper routine for handling exceptions.
- */
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct die_args *args = (struct die_args *)data;
- int ret = NOTIFY_DONE;
-
- switch (val) {
- case DIE_BREAK:
- if (kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_SSTEPBP:
- if (post_kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_PAGE_FAULT:
- /* kprobe_running() needs smp_processor_id(). */
- preempt_disable();
-
- if (kprobe_running()
- && kprobe_fault_handler(args->regs, args->trapnr))
- ret = NOTIFY_STOP;
- preempt_enable();
- break;
- default:
- break;
- }
- return ret;
-}
-
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- kcb->jprobe_saved_regs = *regs;
- kcb->jprobe_saved_sp = regs->sp;
-
- memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
-
- regs->pc = (unsigned long)(jp->entry);
-
- return 1;
-}
-
-/* Defined in the inline asm below. */
-void jprobe_return_end(void);
-
-void __kprobes jprobe_return(void)
-{
- asm volatile(
- "bpt\n\t"
- ".globl jprobe_return_end\n"
- "jprobe_return_end:\n");
-}
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (regs->pc >= (unsigned long)jprobe_return &&
- regs->pc <= (unsigned long)jprobe_return_end) {
- *regs = kcb->jprobe_saved_regs;
- memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
- MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
- preempt_enable_no_resched();
-
- return 1;
- }
- return 0;
-}
-
-/*
- * Function return probe trampoline:
- * - init_kprobes() establishes a probepoint here
- * - When the probed function returns, this probe causes the
- * handlers to fire
- */
-static void __used kretprobe_trampoline_holder(void)
-{
- asm volatile(
- "nop\n\t"
- ".global kretprobe_trampoline\n"
- "kretprobe_trampoline:\n\t"
- "nop\n\t"
- : : : "memory");
-}
-
-void kretprobe_trampoline(void);
-
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
-{
- ri->ret_addr = (kprobe_opcode_t *) regs->lr;
-
- /* Replace the return addr with trampoline addr */
- regs->lr = (unsigned long)kretprobe_trampoline;
-}
-
-/*
- * Called when the probe at kretprobe trampoline is hit.
- */
-static int __kprobes trampoline_probe_handler(struct kprobe *p,
- struct pt_regs *regs)
-{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because multiple functions in the call path have
- * a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address) {
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
- instruction_pointer(regs) = orig_ret_address;
-
- reset_current_kprobe();
- kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we don't want the post_handler
- * to run (and have re-enabled preemption)
- */
- return 1;
-}
-
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
-{
- if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
- return 1;
-
- return 0;
-}
-
-static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *)kretprobe_trampoline,
- .pre_handler = trampoline_probe_handler
-};
-
-int __init arch_init_kprobes(void)
-{
- register_kprobe(&trampoline_p);
- return 0;
-}
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
deleted file mode 100644
index 008aa2faef55..000000000000
--- a/arch/tile/kernel/machine_kexec.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * based on machine_kexec.c from other architectures in linux-2.6.18
- */
-
-#include <linux/mm.h>
-#include <linux/kexec.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/cpumask.h>
-#include <linux/kernel.h>
-#include <linux/elf.h>
-#include <linux/highmem.h>
-#include <linux/mmu_context.h>
-#include <linux/io.h>
-#include <linux/timex.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/checksum.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-#include <hv/hypervisor.h>
-
-
-/*
- * This stuff is not in elf.h and is not in any other kernel include.
- * This stuff is needed below in the little boot notes parser to
- * extract the command line so we can pass it to the hypervisor.
- */
-struct Elf32_Bhdr {
- Elf32_Word b_signature;
- Elf32_Word b_size;
- Elf32_Half b_checksum;
- Elf32_Half b_records;
-};
-#define ELF_BOOT_MAGIC 0x0E1FB007
-#define EBN_COMMAND_LINE 0x00000004
-#define roundupsz(X) (((X) + 3) & ~3)
-
-/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
-
-
-void machine_shutdown(void)
-{
- /*
- * Normally we would stop all the other processors here, but
- * the check in machine_kexec_prepare below ensures we'll only
- * get this far if we've been booted with "nosmp" on the
- * command line or without CONFIG_SMP so there's nothing to do
- * here (for now).
- */
-}
-
-void machine_crash_shutdown(struct pt_regs *regs)
-{
- /*
- * Cannot happen. This type of kexec is disabled on this
- * architecture (and enforced in machine_kexec_prepare below).
- */
-}
-
-
-int machine_kexec_prepare(struct kimage *image)
-{
- if (num_online_cpus() > 1) {
- pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
- __func__);
- return -ENOSYS;
- }
- if (image->type != KEXEC_TYPE_DEFAULT) {
- pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
- __func__, image->type);
- return -ENOSYS;
- }
- return 0;
-}
-
-void machine_kexec_cleanup(struct kimage *image)
-{
- /*
- * We did nothing in machine_kexec_prepare,
- * so we have nothing to do here.
- */
-}
-
-/*
- * If we can find elf boot notes on this page, return the command
- * line. Otherwise, silently return null. Somewhat kludgy, but no
- * good way to do this without significantly rearchitecting the
- * architecture-independent kexec code.
- */
-
-static unsigned char *kexec_bn2cl(void *pg)
-{
- struct Elf32_Bhdr *bhdrp;
- Elf32_Nhdr *nhdrp;
- unsigned char *desc;
- unsigned char *command_line;
- __sum16 csum;
-
- bhdrp = (struct Elf32_Bhdr *) pg;
-
- /*
- * This routine is invoked for every source page, so make
- * sure to quietly ignore every impossible page.
- */
- if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
- bhdrp->b_size > PAGE_SIZE)
- return 0;
-
- /*
- * If we get a checksum mismatch, warn with the checksum
- * so we can diagnose better.
- */
- csum = ip_compute_csum(pg, bhdrp->b_size);
- if (csum != 0) {
- pr_warn("%s: bad checksum %#x (size %d)\n",
- __func__, csum, bhdrp->b_size);
- return 0;
- }
-
- nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
-
- while (nhdrp->n_type != EBN_COMMAND_LINE) {
-
- desc = (unsigned char *) (nhdrp + 1);
- desc += roundupsz(nhdrp->n_descsz);
-
- nhdrp = (Elf32_Nhdr *) desc;
-
- /* still in bounds? */
- if ((unsigned char *) (nhdrp + 1) >
- ((unsigned char *) pg) + bhdrp->b_size) {
-
- pr_info("%s: out of bounds\n", __func__);
- return 0;
- }
- }
-
- command_line = (unsigned char *) (nhdrp + 1);
- desc = command_line;
-
- while (*desc != '\0') {
- desc++;
- if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
- pr_info("%s: ran off end of page\n", __func__);
- return 0;
- }
- }
-
- return command_line;
-}
-
-static void kexec_find_and_set_command_line(struct kimage *image)
-{
- kimage_entry_t *ptr, entry;
-
- unsigned char *command_line = 0;
- unsigned char *r;
- HV_Errno hverr;
-
- for (ptr = &image->head;
- (entry = *ptr) && !(entry & IND_DONE);
- ptr = (entry & IND_INDIRECTION) ?
- phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
-
- if ((entry & IND_SOURCE)) {
- void *va =
- kmap_atomic_pfn(entry >> PAGE_SHIFT);
- r = kexec_bn2cl(va);
- if (r) {
- command_line = r;
- break;
- }
- kunmap_atomic(va);
- }
- }
-
- if (command_line != 0) {
- pr_info("setting new command line to \"%s\"\n", command_line);
-
- hverr = hv_set_command_line(
- (HV_VirtAddr) command_line, strlen(command_line));
- kunmap_atomic(command_line);
- } else {
- pr_info("%s: no command line found; making empty\n", __func__);
- hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
- }
- if (hverr)
- pr_warn("%s: hv_set_command_line returned error: %d\n",
- __func__, hverr);
-}
-
-/*
- * The kexec code range-checks all its PAs, so to avoid having it run
- * amok and allocate memory and then sequester it from every other
- * controller, we force it to come from controller zero. We also
- * disable the oom-killer since if we do end up running out of memory,
- * that almost certainly won't help.
- */
-struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
-{
- gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
- return alloc_pages_node(0, gfp_mask, order);
-}
-
-/*
- * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
- * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
- * for tilepro, while for tilegx, we limit it to entire middle level page
- * table which we assume has been allocated and is undoubtedly large enough.
- */
-#ifndef __tilegx__
-#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
-#else
-#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
-#endif
-
-static void setup_quasi_va_is_pa(void)
-{
- HV_PTE pte;
- unsigned long i;
-
- /*
- * Flush our TLB to prevent conflicts between the previous contents
- * and the new stuff we're about to add.
- */
- local_flush_tlb_all();
-
- /*
- * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
- * Note here we assume that level-1 page table is defined by
- * HPAGE_SIZE.
- */
- pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
- for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
- unsigned long vaddr = i << HPAGE_SHIFT;
- pgd_t *pgd = pgd_offset(current->mm, vaddr);
- pud_t *pud = pud_offset(pgd, vaddr);
- pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
- unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
-
- if (pfn_valid(pfn))
- __set_pte(ptep, pfn_pte(pfn, pte));
- }
-}
-
-
-void machine_kexec(struct kimage *image)
-{
- void *reboot_code_buffer;
- pte_t *ptep;
- void (*rnk)(unsigned long, void *, unsigned long)
- __noreturn;
-
- /* Mask all interrupts before starting to reboot. */
- interrupt_mask_set_mask(~0ULL);
-
- kexec_find_and_set_command_line(image);
-
- /*
- * Adjust the home caching of the control page to be cached on
- * this cpu, and copy the assembly helper into the control
- * code page, which we map in the vmalloc area.
- */
- homecache_change_page_home(image->control_code_page, 0,
- smp_processor_id());
- reboot_code_buffer = page_address(image->control_code_page);
- BUG_ON(reboot_code_buffer == NULL);
- ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
- __set_pte(ptep, pte_mkexec(*ptep));
- memcpy(reboot_code_buffer, relocate_new_kernel,
- relocate_new_kernel_size);
- __flush_icache_range(
- (unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + relocate_new_kernel_size);
-
- setup_quasi_va_is_pa();
-
- /* now call it */
- rnk = reboot_code_buffer;
- (*rnk)(image->head, reboot_code_buffer, image->start);
-}
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S
deleted file mode 100644
index 6c6702451962..000000000000
--- a/arch/tile/kernel/mcount_64.S
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE-Gx specific __mcount support
- */
-
-#include <linux/linkage.h>
-#include <asm/ftrace.h>
-
-#define REGSIZE 8
-
- .text
- .global __mcount
-
- .macro MCOUNT_SAVE_REGS
- addli sp, sp, -REGSIZE
- {
- st sp, lr
- addli r29, sp, - (12 * REGSIZE)
- }
- {
- addli sp, sp, - (13 * REGSIZE)
- st r29, sp
- }
- addli r29, r29, REGSIZE
- { st r29, r0; addli r29, r29, REGSIZE }
- { st r29, r1; addli r29, r29, REGSIZE }
- { st r29, r2; addli r29, r29, REGSIZE }
- { st r29, r3; addli r29, r29, REGSIZE }
- { st r29, r4; addli r29, r29, REGSIZE }
- { st r29, r5; addli r29, r29, REGSIZE }
- { st r29, r6; addli r29, r29, REGSIZE }
- { st r29, r7; addli r29, r29, REGSIZE }
- { st r29, r8; addli r29, r29, REGSIZE }
- { st r29, r9; addli r29, r29, REGSIZE }
- { st r29, r10; addli r29, r29, REGSIZE }
- .endm
-
- .macro MCOUNT_RESTORE_REGS
- addli r29, sp, (2 * REGSIZE)
- { ld r0, r29; addli r29, r29, REGSIZE }
- { ld r1, r29; addli r29, r29, REGSIZE }
- { ld r2, r29; addli r29, r29, REGSIZE }
- { ld r3, r29; addli r29, r29, REGSIZE }
- { ld r4, r29; addli r29, r29, REGSIZE }
- { ld r5, r29; addli r29, r29, REGSIZE }
- { ld r6, r29; addli r29, r29, REGSIZE }
- { ld r7, r29; addli r29, r29, REGSIZE }
- { ld r8, r29; addli r29, r29, REGSIZE }
- { ld r9, r29; addli r29, r29, REGSIZE }
- { ld r10, r29; addli lr, sp, (13 * REGSIZE) }
- { ld lr, lr; addli sp, sp, (14 * REGSIZE) }
- .endm
-
- .macro RETURN_BACK
- { move r12, lr; move lr, r10 }
- jrp r12
- .endm
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
- .align 64
-STD_ENTRY(__mcount)
-__mcount:
- j ftrace_stub
-STD_ENDPROC(__mcount)
-
- .align 64
-STD_ENTRY(ftrace_caller)
- MCOUNT_SAVE_REGS
-
- /* arg1: self return address */
- /* arg2: parent's return address */
- /* arg3: ftrace_ops */
- /* arg4: regs (but make it NULL) */
- { move r0, lr; moveli r2, hw2_last(function_trace_op) }
- { move r1, r10; shl16insli r2, r2, hw1(function_trace_op) }
- { movei r3, 0; shl16insli r2, r2, hw0(function_trace_op) }
- ld r2,r2
-
- .global ftrace_call
-ftrace_call:
- /*
- * a placeholder for the call to a real tracing function, i.e.
- * ftrace_trace_function()
- */
- nop
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .global ftrace_graph_call
-ftrace_graph_call:
- /*
- * a placeholder for the call to a real tracing function, i.e.
- * ftrace_graph_caller()
- */
- nop
-#endif
- MCOUNT_RESTORE_REGS
- .global ftrace_stub
-ftrace_stub:
- RETURN_BACK
-STD_ENDPROC(ftrace_caller)
-
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
- .align 64
-STD_ENTRY(__mcount)
- {
- moveli r11, hw2_last(ftrace_trace_function)
- moveli r13, hw2_last(ftrace_stub)
- }
- {
- shl16insli r11, r11, hw1(ftrace_trace_function)
- shl16insli r13, r13, hw1(ftrace_stub)
- }
- {
- shl16insli r11, r11, hw0(ftrace_trace_function)
- shl16insli r13, r13, hw0(ftrace_stub)
- }
-
- ld r11, r11
- sub r14, r13, r11
- bnez r14, static_trace
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- moveli r15, hw2_last(ftrace_graph_return)
- shl16insli r15, r15, hw1(ftrace_graph_return)
- shl16insli r15, r15, hw0(ftrace_graph_return)
- ld r15, r15
- sub r15, r15, r13
- bnez r15, ftrace_graph_caller
-
- {
- moveli r16, hw2_last(ftrace_graph_entry)
- moveli r17, hw2_last(ftrace_graph_entry_stub)
- }
- {
- shl16insli r16, r16, hw1(ftrace_graph_entry)
- shl16insli r17, r17, hw1(ftrace_graph_entry_stub)
- }
- {
- shl16insli r16, r16, hw0(ftrace_graph_entry)
- shl16insli r17, r17, hw0(ftrace_graph_entry_stub)
- }
- ld r16, r16
- sub r17, r16, r17
- bnez r17, ftrace_graph_caller
-
-#endif
- RETURN_BACK
-
-static_trace:
- MCOUNT_SAVE_REGS
-
- /* arg1: self return address */
- /* arg2: parent's return address */
- { move r0, lr; move r1, r10 }
-
- /* call ftrace_trace_function() */
- jalr r11
-
- MCOUNT_RESTORE_REGS
-
- .global ftrace_stub
-ftrace_stub:
- RETURN_BACK
-STD_ENDPROC(__mcount)
-
-#endif /* ! CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-STD_ENTRY(ftrace_graph_caller)
-ftrace_graph_caller:
-#ifndef CONFIG_DYNAMIC_FTRACE
- MCOUNT_SAVE_REGS
-#endif
-
- /* arg1: Get the location of the parent's return address */
- addi r0, sp, 12 * REGSIZE
- /* arg2: Get self return address */
- move r1, lr
-
- jal prepare_ftrace_return
-
- MCOUNT_RESTORE_REGS
- RETURN_BACK
-STD_ENDPROC(ftrace_graph_caller)
-
- .global return_to_handler
-return_to_handler:
- MCOUNT_SAVE_REGS
-
- jal ftrace_return_to_handler
- /* restore the real parent address */
- move r11, r0
-
- MCOUNT_RESTORE_REGS
- jr r11
-
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
deleted file mode 100644
index 7475af3aacec..000000000000
--- a/arch/tile/kernel/messaging.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/percpu.h>
-#include <linux/smp.h>
-#include <linux/hardirq.h>
-#include <linux/ptrace.h>
-#include <asm/hv_driver.h>
-#include <asm/irq_regs.h>
-#include <asm/traps.h>
-#include <hv/hypervisor.h>
-#include <arch/interrupts.h>
-
-/* All messages are stored here */
-static DEFINE_PER_CPU(HV_MsgState, msg_state);
-
-void init_messaging(void)
-{
- /* Allocate storage for messages in kernel space */
- HV_MsgState *state = this_cpu_ptr(&msg_state);
- int rc = hv_register_message_state(state);
- if (rc != HV_OK)
- panic("hv_register_message_state: error %d", rc);
-
- /* Make sure downcall interrupts will be enabled. */
- arch_local_irq_unmask(INT_INTCTRL_K);
-}
-
-void hv_message_intr(struct pt_regs *regs, int intnum)
-{
- /*
- * We enter with interrupts disabled and leave them disabled,
- * to match expectations of called functions (e.g.
- * do_ccupdate_local() in mm/slab.c). This is also consistent
- * with normal call entry for device interrupts.
- */
-
- int message[HV_MAX_MESSAGE_SIZE/sizeof(int)];
- HV_RcvMsgInfo rmi;
- int nmsgs = 0;
-
- /* Track time spent here in an interrupt context */
- struct pt_regs *old_regs = set_irq_regs(regs);
- irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: less than 1/8th stack free? */
- {
- long sp = stack_pointer - (long) current_thread_info();
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- pr_emerg("%s: stack overflow: %ld\n",
- __func__, sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
-
- while (1) {
- HV_MsgState *state = this_cpu_ptr(&msg_state);
- rmi = hv_receive_message(*state, (HV_VirtAddr) message,
- sizeof(message));
- if (rmi.msglen == 0)
- break;
-
- if (rmi.msglen < 0)
- panic("hv_receive_message failed: %d", rmi.msglen);
-
- ++nmsgs;
-
- if (rmi.source == HV_MSG_TILE) {
- int tag;
-
- /* we just send tags for now */
- BUG_ON(rmi.msglen != sizeof(int));
-
- tag = message[0];
-#ifdef CONFIG_SMP
- evaluate_message(message[0]);
-#else
- panic("Received IPI message %d in UP mode", tag);
-#endif
- } else if (rmi.source == HV_MSG_INTR) {
- HV_IntrMsg *him = (HV_IntrMsg *)message;
- struct hv_driver_cb *cb =
- (struct hv_driver_cb *)him->intarg;
- cb->callback(cb, him->intdata);
- __this_cpu_inc(irq_stat.irq_hv_msg_count);
- }
- }
-
- /*
- * We shouldn't have gotten a message downcall with no
- * messages available.
- */
- if (nmsgs == 0)
- panic("Message downcall invoked with no messages!");
-
- /*
- * Track time spent against the current process again and
- * process any softirqs if they are waiting.
- */
- irq_exit();
- set_irq_regs(old_regs);
-}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
deleted file mode 100644
index 09233fbe7801..000000000000
--- a/arch/tile/kernel/module.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Based on i386 version, copyright (C) 2001 Rusty Russell.
- */
-
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <asm/pgtable.h>
-#include <asm/homecache.h>
-#include <arch/opcode.h>
-
-#ifdef MODULE_DEBUG
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt...)
-#endif
-
-/*
- * Allocate some address space in the range MEM_MODULE_START to
- * MEM_MODULE_END and populate it with memory.
- */
-void *module_alloc(unsigned long size)
-{
- struct page **pages;
- pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC);
- struct vm_struct *area;
- int i = 0;
- int npages;
-
- npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (pages == NULL)
- return NULL;
- for (; i < npages; ++i) {
- pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!pages[i])
- goto free_pages;
- }
-
- area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
- if (!area)
- goto free_pages;
- area->nr_pages = npages;
- area->pages = pages;
-
- if (map_vm_area(area, prot_rwx, pages)) {
- vunmap(area->addr);
- goto free_pages;
- }
-
- return area->addr;
- free_pages:
- while (--i >= 0)
- __free_page(pages[i]);
- kfree(pages);
- return NULL;
-}
-
-
-/* Free memory returned from module_alloc */
-void module_memfree(void *module_region)
-{
- vfree(module_region);
-
- /* Globally flush the L1 icache. */
- flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
- 0, 0, 0, NULL, NULL, 0);
-
- /*
- * FIXME: Add module_arch_freeing_init to trim exception
- * table entries.
- */
-}
-
-#ifdef __tilegx__
-/*
- * Validate that the high 16 bits of "value" is just the sign-extension of
- * the low 48 bits.
- */
-static int validate_hw2_last(long value, struct module *me)
-{
- if (((value << 16) >> 16) != value) {
- pr_warn("module %s: Out of range HW2_LAST value %#lx\n",
- me->name, value);
- return 0;
- }
- return 1;
-}
-
-/*
- * Validate that "value" isn't too big to hold in a JumpOff relocation.
- */
-static int validate_jumpoff(long value)
-{
- /* Determine size of jump offset. */
- int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1)));
-
- /* Check to see if it fits into the relocation slot. */
- long f = get_JumpOff_X1(create_JumpOff_X1(value));
- f = (f << shift) >> shift;
-
- return f == value;
-}
-#endif
-
-int apply_relocate_add(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- unsigned int i;
- Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
- Elf_Sym *sym;
- u64 *location;
- unsigned long value;
-
- DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /*
- * This is the symbol it is referring to.
- * Note that all undefined symbols have been resolved.
- */
- sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_R_SYM(rel[i].r_info);
- value = sym->st_value + rel[i].r_addend;
-
- switch (ELF_R_TYPE(rel[i].r_info)) {
-
-#ifdef __LITTLE_ENDIAN
-# define MUNGE(func) \
- (*location = ((*location & ~func(-1)) | func(value)))
-#else
-/*
- * Instructions are always little-endian, so when we read them as data,
- * we have to swap them around before and after modifying them.
- */
-# define MUNGE(func) \
- (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
-#endif
-
-#ifndef __tilegx__
- case R_TILE_32:
- *(uint32_t *)location = value;
- break;
- case R_TILE_IMM16_X0_HA:
- value = (value + 0x8000) >> 16;
- /*FALLTHROUGH*/
- case R_TILE_IMM16_X0_LO:
- MUNGE(create_Imm16_X0);
- break;
- case R_TILE_IMM16_X1_HA:
- value = (value + 0x8000) >> 16;
- /*FALLTHROUGH*/
- case R_TILE_IMM16_X1_LO:
- MUNGE(create_Imm16_X1);
- break;
- case R_TILE_JOFFLONG_X1:
- value -= (unsigned long) location; /* pc-relative */
- value = (long) value >> 3; /* count by instrs */
- MUNGE(create_JOffLong_X1);
- break;
-#else
- case R_TILEGX_64:
- *location = value;
- break;
- case R_TILEGX_IMM16_X0_HW2_LAST:
- if (!validate_hw2_last(value, me))
- return -ENOEXEC;
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X0_HW1:
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X0_HW0:
- MUNGE(create_Imm16_X0);
- break;
- case R_TILEGX_IMM16_X1_HW2_LAST:
- if (!validate_hw2_last(value, me))
- return -ENOEXEC;
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X1_HW1:
- value >>= 16;
- /*FALLTHROUGH*/
- case R_TILEGX_IMM16_X1_HW0:
- MUNGE(create_Imm16_X1);
- break;
- case R_TILEGX_JUMPOFF_X1:
- value -= (unsigned long) location; /* pc-relative */
- value = (long) value >> 3; /* count by instrs */
- if (!validate_jumpoff(value)) {
- pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n",
- me->name,
- sym->st_value + rel[i].r_addend,
- rel[i].r_offset, location);
- return -ENOEXEC;
- }
- MUNGE(create_JumpOff_X1);
- break;
-#endif
-
-#undef MUNGE
-
- default:
- pr_err("module %s: Unknown relocation: %d\n",
- me->name, (int) ELF_R_TYPE(rel[i].r_info));
- return -ENOEXEC;
- }
- }
- return 0;
-}
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
deleted file mode 100644
index 6a1efe5543fa..000000000000
--- a/arch/tile/kernel/pci-dma.c
+++ /dev/null
@@ -1,607 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-
-/* Generic DMA mapping functions: */
-
-/*
- * Allocate what Linux calls "coherent" memory. On TILEPro this is
- * uncached memory; on TILE-Gx it is hash-for-home memory.
- */
-#ifdef __tilepro__
-#define PAGE_HOME_DMA PAGE_HOME_UNCACHED
-#else
-#define PAGE_HOME_DMA PAGE_HOME_HASH
-#endif
-
-static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- u64 dma_mask = (dev && dev->coherent_dma_mask) ?
- dev->coherent_dma_mask : DMA_BIT_MASK(32);
- int node = dev ? dev_to_node(dev) : 0;
- int order = get_order(size);
- struct page *pg;
- dma_addr_t addr;
-
- gfp |= __GFP_ZERO;
-
- /*
- * If the mask specifies that the memory be in the first 4 GB, then
- * we force the allocation to come from the DMA zone. We also
- * force the node to 0 since that's the only node where the DMA
- * zone isn't empty. If the mask size is smaller than 32 bits, we
- * may still not be able to guarantee a suitable memory address, in
- * which case we will return NULL. But such devices are uncommon.
- */
- if (dma_mask <= DMA_BIT_MASK(32)) {
- gfp |= GFP_DMA32;
- node = 0;
- }
-
- pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
- if (pg == NULL)
- return NULL;
-
- addr = page_to_phys(pg);
- if (addr + size > dma_mask) {
- __homecache_free_pages(pg, order);
- return NULL;
- }
-
- *dma_handle = addr;
-
- return page_address(pg);
-}
-
-/*
- * Free memory that was allocated with tile_dma_alloc_coherent.
- */
-static void tile_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- homecache_free_pages((unsigned long)vaddr, get_order(size));
-}
-
-/*
- * The map routines "map" the specified address range for DMA
- * accesses. The memory belongs to the device after this call is
- * issued, until it is unmapped with dma_unmap_single.
- *
- * We don't need to do any mapping, we just flush the address range
- * out of the cache and return a DMA address.
- *
- * The unmap routines do whatever is necessary before the processor
- * accesses the memory again, and must be called before the driver
- * touches the memory. We can get away with a cache invalidate if we
- * can count on nothing having been touched.
- */
-
-/* Set up a single page for DMA access. */
-static void __dma_prep_page(struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- /*
- * Flush the page from cache if necessary.
- * On tilegx, data is delivered to hash-for-home L3; on tilepro,
- * data is delivered direct to memory.
- *
- * NOTE: If we were just doing DMA_TO_DEVICE we could optimize
- * this to be a "flush" not a "finv" and keep some of the
- * state in cache across the DMA operation, but it doesn't seem
- * worth creating the necessary flush_buffer_xxx() infrastructure.
- */
- int home = page_home(page);
- switch (home) {
- case PAGE_HOME_HASH:
-#ifdef __tilegx__
- return;
-#endif
- break;
- case PAGE_HOME_UNCACHED:
-#ifdef __tilepro__
- return;
-#endif
- break;
- case PAGE_HOME_IMMUTABLE:
- /* Should be going to the device only. */
- BUG_ON(direction == DMA_FROM_DEVICE ||
- direction == DMA_BIDIRECTIONAL);
- return;
- case PAGE_HOME_INCOHERENT:
- /* Incoherent anyway, so no need to work hard here. */
- return;
- default:
- BUG_ON(home < 0 || home >= NR_CPUS);
- break;
- }
- homecache_finv_page(page);
-
-#ifdef DEBUG_ALIGNMENT
- /* Warn if the region isn't cacheline aligned. */
- if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1)))
- pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n",
- PFN_PHYS(page_to_pfn(page)) + offset, size);
-#endif
-}
-
-/* Make the page ready to be read by the core. */
-static void __dma_complete_page(struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
-#ifdef __tilegx__
- switch (page_home(page)) {
- case PAGE_HOME_HASH:
- /* I/O device delivered data the way the cpu wanted it. */
- break;
- case PAGE_HOME_INCOHERENT:
- /* Incoherent anyway, so no need to work hard here. */
- break;
- case PAGE_HOME_IMMUTABLE:
- /* Extra read-only copies are not a problem. */
- break;
- default:
- /* Flush the bogus hash-for-home I/O entries to memory. */
- homecache_finv_map_page(page, PAGE_HOME_HASH);
- break;
- }
-#endif
-}
-
-static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
- unsigned long offset = dma_addr & (PAGE_SIZE - 1);
- size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
-
- while (size != 0) {
- __dma_prep_page(page, offset, bytes, direction);
- size -= bytes;
- ++page;
- offset = 0;
- bytes = min((size_t)PAGE_SIZE, size);
- }
-}
-
-static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
- unsigned long offset = dma_addr & (PAGE_SIZE - 1);
- size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
-
- while (size != 0) {
- __dma_complete_page(page, offset, bytes, direction);
- size -= bytes;
- ++page;
- offset = 0;
- bytes = min((size_t)PAGE_SIZE, size);
- }
-}
-
-static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
-
- WARN_ON(nents == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
- __dma_prep_pa_range(sg->dma_address, sg->length, direction);
- }
-
- return nents;
-}
-
-static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
- __dma_complete_pa_range(sg->dma_address, sg->length,
- direction);
- }
-}
-
-static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- BUG_ON(offset + size > PAGE_SIZE);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- __dma_prep_page(page, offset, size, direction);
-
- return page_to_pa(page) + offset;
-}
-
-static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- return;
-
- __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
- dma_address & (PAGE_SIZE - 1), size, direction);
-}
-
-static void tile_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- __dma_complete_pa_range(dma_handle, size, direction);
-}
-
-static void tile_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- __dma_prep_pa_range(dma_handle, size, direction);
-}
-
-static void tile_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_cpu(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static void tile_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_device(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static const struct dma_map_ops tile_default_dma_map_ops = {
- .alloc = tile_dma_alloc_coherent,
- .free = tile_dma_free_coherent,
- .map_page = tile_dma_map_page,
- .unmap_page = tile_dma_unmap_page,
- .map_sg = tile_dma_map_sg,
- .unmap_sg = tile_dma_unmap_sg,
- .sync_single_for_cpu = tile_dma_sync_single_for_cpu,
- .sync_single_for_device = tile_dma_sync_single_for_device,
- .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
- .sync_sg_for_device = tile_dma_sync_sg_for_device,
-};
-
-const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
-EXPORT_SYMBOL(tile_dma_map_ops);
-
-/* Generic PCI DMA mapping functions */
-
-static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- int node = dev_to_node(dev);
- int order = get_order(size);
- struct page *pg;
- dma_addr_t addr;
-
- gfp |= __GFP_ZERO;
-
- pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
- if (pg == NULL)
- return NULL;
-
- addr = page_to_phys(pg);
-
- *dma_handle = addr + get_dma_offset(dev);
-
- return page_address(pg);
-}
-
-/*
- * Free memory that was allocated with tile_pci_dma_alloc_coherent.
- */
-static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- homecache_free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
-
- WARN_ON(nents == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- __dma_prep_pa_range(sg->dma_address, sg->length, direction);
-
- sg->dma_address = sg->dma_address + get_dma_offset(dev);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- }
-
- return nents;
-}
-
-static void tile_pci_dma_unmap_sg(struct device *dev,
- struct scatterlist *sglist, int nents,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- for_each_sg(sglist, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- __dma_complete_pa_range(sg->dma_address, sg->length,
- direction);
- }
-}
-
-static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- BUG_ON(offset + size > PAGE_SIZE);
- __dma_prep_page(page, offset, size, direction);
-
- return page_to_pa(page) + offset + get_dma_offset(dev);
-}
-
-static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- dma_address -= get_dma_offset(dev);
-
- __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
- dma_address & (PAGE_SIZE - 1), size, direction);
-}
-
-static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- dma_handle -= get_dma_offset(dev);
-
- __dma_complete_pa_range(dma_handle, size, direction);
-}
-
-static void tile_pci_dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction
- direction)
-{
- dma_handle -= get_dma_offset(dev);
-
- __dma_prep_pa_range(dma_handle, size, direction);
-}
-
-static void tile_pci_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sglist,
- int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_cpu(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static void tile_pci_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sglist,
- int nelems,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sglist->length == 0);
-
- for_each_sg(sglist, sg, nelems, i) {
- dma_sync_single_for_device(dev, sg->dma_address,
- sg_dma_len(sg), direction);
- }
-}
-
-static const struct dma_map_ops tile_pci_default_dma_map_ops = {
- .alloc = tile_pci_dma_alloc_coherent,
- .free = tile_pci_dma_free_coherent,
- .map_page = tile_pci_dma_map_page,
- .unmap_page = tile_pci_dma_unmap_page,
- .map_sg = tile_pci_dma_map_sg,
- .unmap_sg = tile_pci_dma_unmap_sg,
- .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
- .sync_single_for_device = tile_pci_dma_sync_single_for_device,
- .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
- .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
-};
-
-const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
-EXPORT_SYMBOL(gx_pci_dma_map_ops);
-
-/* PCI DMA mapping functions for legacy PCI devices */
-
-#ifdef CONFIG_SWIOTLB
-static const struct dma_map_ops pci_hybrid_dma_ops = {
- .alloc = swiotlb_alloc,
- .free = swiotlb_free,
- .map_page = tile_pci_dma_map_page,
- .unmap_page = tile_pci_dma_unmap_page,
- .map_sg = tile_pci_dma_map_sg,
- .unmap_sg = tile_pci_dma_unmap_sg,
- .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
- .sync_single_for_device = tile_pci_dma_sync_single_for_device,
- .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
- .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
-};
-
-const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &swiotlb_dma_ops;
-const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
-#else
-const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
-const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
-#endif
-EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
-EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
-
-int dma_set_mask(struct device *dev, u64 mask)
-{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- /*
- * For PCI devices with 64-bit DMA addressing capability, promote
- * the dma_ops to hybrid, with the consistent memory DMA space limited
- * to 32-bit. For 32-bit capable devices, limit the streaming DMA
- * address range to max_direct_dma_addr.
- */
- if (dma_ops == gx_pci_dma_map_ops ||
- dma_ops == gx_hybrid_pci_dma_map_ops ||
- dma_ops == gx_legacy_pci_dma_map_ops) {
- if (mask == DMA_BIT_MASK(64) &&
- dma_ops == gx_legacy_pci_dma_map_ops)
- set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
- else if (mask > dev->archdata.max_direct_dma_addr)
- mask = dev->archdata.max_direct_dma_addr;
- }
-
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-EXPORT_SYMBOL(dma_set_mask);
-
-#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
-int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- /*
- * For PCI devices with 64-bit DMA addressing capability, promote
- * the dma_ops to full capability for both streams and consistent
- * memory access. For 32-bit capable devices, limit the consistent
- * memory DMA range to max_direct_dma_addr.
- */
- if (dma_ops == gx_pci_dma_map_ops ||
- dma_ops == gx_hybrid_pci_dma_map_ops ||
- dma_ops == gx_legacy_pci_dma_map_ops) {
- if (mask == DMA_BIT_MASK(64))
- set_dma_ops(dev, gx_pci_dma_map_ops);
- else if (mask > dev->archdata.max_direct_dma_addr)
- mask = dev->archdata.max_direct_dma_addr;
- }
-
- if (!dma_supported(dev, mask))
- return -EIO;
- dev->coherent_dma_mask = mask;
- return 0;
-}
-EXPORT_SYMBOL(dma_set_coherent_mask);
-#endif
-
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
-/*
- * The generic dma_get_required_mask() uses the highest physical address
- * (max_pfn) to provide the hint to the PCI drivers regarding 32-bit or
- * 64-bit DMA configuration. Since TILEGx has I/O TLB/MMU, allowing the
- * DMAs to use the full 64-bit PCI address space and not limited by
- * the physical memory space, we always let the PCI devices use
- * 64-bit DMA if they have that capability, by returning the 64-bit
- * DMA mask here. The device driver has the option to use 32-bit DMA if
- * the device is not capable of 64-bit DMA.
- */
-u64 dma_get_required_mask(struct device *dev)
-{
- return DMA_BIT_MASK(64);
-}
-EXPORT_SYMBOL_GPL(dma_get_required_mask);
-#endif
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
deleted file mode 100644
index bbf81579b1f8..000000000000
--- a/arch/tile/kernel/pci.c
+++ /dev/null
@@ -1,592 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/export.h>
-
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/byteorder.h>
-#include <asm/hv_driver.h>
-#include <hv/drv_pcie_rc_intf.h>
-
-
-/*
- * Initialization flow and process
- * -------------------------------
- *
- * This files contains the routines to search for PCI buses,
- * enumerate the buses, and configure any attached devices.
- *
- * There are two entry points here:
- * 1) tile_pci_init
- * This sets up the pci_controller structs, and opens the
- * FDs to the hypervisor. This is called from setup_arch() early
- * in the boot process.
- * 2) pcibios_init
- * This probes the PCI bus(es) for any attached hardware. It's
- * called by subsys_initcall. All of the real work is done by the
- * generic Linux PCI layer.
- *
- */
-
-static int pci_probe = 1;
-
-/*
- * This flag tells if the platform is TILEmpower that needs
- * special configuration for the PLX switch chip.
- */
-int __ro_after_init tile_plx_gen1;
-
-static struct pci_controller controllers[TILE_NUM_PCIE];
-static int num_controllers;
-static int pci_scan_flags[TILE_NUM_PCIE];
-
-static struct pci_ops tile_cfg_ops;
-
-
-/*
- * Open a FD to the hypervisor PCI device.
- *
- * controller_id is the controller number, config type is 0 or 1 for
- * config0 or config1 operations.
- */
-static int tile_pcie_open(int controller_id, int config_type)
-{
- char filename[32];
- int fd;
-
- sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
-
- fd = hv_dev_open((HV_VirtAddr)filename, 0);
-
- return fd;
-}
-
-
-/*
- * Get the IRQ numbers from the HV and set up the handlers for them.
- */
-static int tile_init_irqs(int controller_id, struct pci_controller *controller)
-{
- char filename[32];
- int fd;
- int ret;
- int x;
- struct pcie_rc_config rc_config;
-
- sprintf(filename, "pcie/%d/ctl", controller_id);
- fd = hv_dev_open((HV_VirtAddr)filename, 0);
- if (fd < 0) {
- pr_err("PCI: hv_dev_open(%s) failed\n", filename);
- return -1;
- }
- ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
- sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
- hv_dev_close(fd);
- if (ret != sizeof(rc_config)) {
- pr_err("PCI: wanted %zd bytes, got %d\n",
- sizeof(rc_config), ret);
- return -1;
- }
- /* Record irq_base so that we can map INTx to IRQ # later. */
- controller->irq_base = rc_config.intr;
-
- for (x = 0; x < 4; x++)
- tile_irq_activate(rc_config.intr + x,
- TILE_IRQ_HW_CLEAR);
-
- if (rc_config.plx_gen1)
- controller->plx_gen1 = 1;
-
- return 0;
-}
-
-/*
- * First initialization entry point, called from setup_arch().
- *
- * Find valid controllers and fill in pci_controller structs for each
- * of them.
- *
- * Returns the number of controllers discovered.
- */
-int __init tile_pci_init(void)
-{
- int i;
-
- if (!pci_probe) {
- pr_info("PCI: disabled by boot argument\n");
- return 0;
- }
-
- pr_info("PCI: Searching for controllers...\n");
-
- /* Re-init number of PCIe controllers to support hot-plug feature. */
- num_controllers = 0;
-
- /* Do any configuration we need before using the PCIe */
-
- for (i = 0; i < TILE_NUM_PCIE; i++) {
- /*
- * To see whether we need a real config op based on
- * the results of pcibios_init(), to support PCIe hot-plug.
- */
- if (pci_scan_flags[i] == 0) {
- int hv_cfg_fd0 = -1;
- int hv_cfg_fd1 = -1;
- int hv_mem_fd = -1;
- char name[32];
- struct pci_controller *controller;
-
- /*
- * Open the fd to the HV. If it fails then this
- * device doesn't exist.
- */
- hv_cfg_fd0 = tile_pcie_open(i, 0);
- if (hv_cfg_fd0 < 0)
- continue;
- hv_cfg_fd1 = tile_pcie_open(i, 1);
- if (hv_cfg_fd1 < 0) {
- pr_err("PCI: Couldn't open config fd to HV for controller %d\n",
- i);
- goto err_cont;
- }
-
- sprintf(name, "pcie/%d/mem", i);
- hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
- if (hv_mem_fd < 0) {
- pr_err("PCI: Could not open mem fd to HV!\n");
- goto err_cont;
- }
-
- pr_info("PCI: Found PCI controller #%d\n", i);
-
- controller = &controllers[i];
-
- controller->index = i;
- controller->hv_cfg_fd[0] = hv_cfg_fd0;
- controller->hv_cfg_fd[1] = hv_cfg_fd1;
- controller->hv_mem_fd = hv_mem_fd;
- controller->last_busno = 0xff;
- controller->ops = &tile_cfg_ops;
-
- num_controllers++;
- continue;
-
-err_cont:
- if (hv_cfg_fd0 >= 0)
- hv_dev_close(hv_cfg_fd0);
- if (hv_cfg_fd1 >= 0)
- hv_dev_close(hv_cfg_fd1);
- if (hv_mem_fd >= 0)
- hv_dev_close(hv_mem_fd);
- continue;
- }
- }
-
- /*
- * Before using the PCIe, see if we need to do any platform-specific
- * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
- */
- for (i = 0; i < num_controllers; i++) {
- struct pci_controller *controller = &controllers[i];
-
- if (controller->plx_gen1)
- tile_plx_gen1 = 1;
- }
-
- return num_controllers;
-}
-
-/*
- * (pin - 1) converts from the PCI standard's [1:4] convention to
- * a normal [0:3] range.
- */
-static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- struct pci_controller *controller =
- (struct pci_controller *)dev->sysdata;
- return (pin - 1) + controller->irq_base;
-}
-
-
-static void fixup_read_and_payload_sizes(void)
-{
- struct pci_dev *dev = NULL;
- int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
- int max_read_size = PCI_EXP_DEVCTL_READRQ_512B;
- u16 new_values;
-
- /* Scan for the smallest maximum payload size. */
- for_each_pci_dev(dev) {
- if (!pci_is_pcie(dev))
- continue;
-
- if (dev->pcie_mpss < smallest_max_payload)
- smallest_max_payload = dev->pcie_mpss;
- }
-
- /* Now, set the max_payload_size for all devices to that value. */
- new_values = max_read_size | (smallest_max_payload << 5);
- for_each_pci_dev(dev)
- pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
- new_values);
-}
-
-
-/*
- * Second PCI initialization entry point, called by subsys_initcall.
- *
- * The controllers have been set up by the time we get here, by a call to
- * tile_pci_init.
- */
-int __init pcibios_init(void)
-{
- struct pci_host_bridge *bridge;
- int i;
-
- pr_info("PCI: Probing PCI hardware\n");
-
- /*
- * Delay a bit in case devices aren't ready. Some devices are
- * known to require at least 20ms here, but we use a more
- * conservative value.
- */
- msleep(250);
-
- /* Scan all of the recorded PCI controllers. */
- for (i = 0; i < TILE_NUM_PCIE; i++) {
- /*
- * Do real pcibios init ops if the controller is initialized
- * by tile_pci_init() successfully and not initialized by
- * pcibios_init() yet to support PCIe hot-plug.
- */
- if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
- struct pci_controller *controller = &controllers[i];
- struct pci_bus *bus;
- LIST_HEAD(resources);
-
- if (tile_init_irqs(i, controller)) {
- pr_err("PCI: Could not initialize IRQs\n");
- continue;
- }
-
- pr_info("PCI: initializing controller #%d\n", i);
-
- pci_add_resource(&resources, &ioport_resource);
- pci_add_resource(&resources, &iomem_resource);
-
- bridge = pci_alloc_host_bridge(0);
- if (!bridge)
- break;
-
- list_splice_init(&resources, &bridge->windows);
- bridge->dev.parent = NULL;
- bridge->sysdata = controller;
- bridge->busnr = 0;
- bridge->ops = controller->ops;
- bridge->swizzle_irq = pci_common_swizzle;
- bridge->map_irq = tile_map_irq;
-
- pci_scan_root_bus_bridge(bridge);
- bus = bridge->bus;
- controller->root_bus = bus;
- controller->last_busno = bus->busn_res.end;
- }
- }
-
- /*
- * This comes from the generic Linux PCI driver.
- *
- * It allocates all of the resources (I/O memory, etc)
- * associated with the devices read in above.
- */
- pci_assign_unassigned_resources();
-
- /* Configure the max_read_size and max_payload_size values. */
- fixup_read_and_payload_sizes();
-
- /* Record the I/O resources in the PCI controller structure. */
- for (i = 0; i < TILE_NUM_PCIE; i++) {
- /*
- * Do real pcibios init ops if the controller is initialized
- * by tile_pci_init() successfully and not initialized by
- * pcibios_init() yet to support PCIe hot-plug.
- */
- if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
- struct pci_bus *root_bus = controllers[i].root_bus;
- struct pci_bus *next_bus;
- struct pci_dev *dev;
-
- pci_bus_add_devices(root_bus);
-
- list_for_each_entry(dev, &root_bus->devices, bus_list) {
- /*
- * Find the PCI host controller, ie. the 1st
- * bridge.
- */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
- (PCI_SLOT(dev->devfn) == 0)) {
- next_bus = dev->subordinate;
- controllers[i].mem_resources[0] =
- *next_bus->resource[0];
- controllers[i].mem_resources[1] =
- *next_bus->resource[1];
- controllers[i].mem_resources[2] =
- *next_bus->resource[2];
-
- /* Setup flags. */
- pci_scan_flags[i] = 1;
-
- break;
- }
- }
- }
- }
-
- return 0;
-}
-subsys_initcall(pcibios_init);
-
-void pcibios_set_master(struct pci_dev *dev)
-{
- /* No special bus mastering setup handling. */
-}
-
-/* Process any "pci=" kernel boot arguments. */
-char *__init pcibios_setup(char *str)
-{
- if (!strcmp(str, "off")) {
- pci_probe = 0;
- return NULL;
- }
- return str;
-}
-
-/*
- * Enable memory and/or address decoding, as appropriate, for the
- * device described by the 'dev' struct.
- *
- * This is called from the generic PCI layer, and can be called
- * for bridges or endpoints.
- */
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- u8 header_type;
- int i;
- struct resource *r;
-
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * For bridges, we enable both memory and I/O decoding
- * in call cases.
- */
- cmd |= PCI_COMMAND_IO;
- cmd |= PCI_COMMAND_MEMORY;
- } else {
- /*
- * For endpoints, we enable memory and/or I/O decoding
- * only if they have a memory resource of that type.
- */
- for (i = 0; i < 6; i++) {
- r = &dev->resource[i];
- if (r->flags & IORESOURCE_UNSET) {
- pr_err("PCI: Device %s not available because of resource collisions\n",
- pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- }
-
- /*
- * We only write the command if it changed.
- */
- if (cmd != old_cmd)
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- return 0;
-}
-
-/****************************************************************
- *
- * Tile PCI config space read/write routines
- *
- ****************************************************************/
-
-/*
- * These are the normal read and write ops
- * These are expanded with macros from pci_bus_read_config_byte() etc.
- *
- * devfn is the combined PCI slot & function.
- *
- * offset is in bytes, from the start of config space for the
- * specified bus & slot.
- */
-
-static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 *val)
-{
- struct pci_controller *controller = bus->sysdata;
- int busnum = bus->number & 0xff;
- int slot = (devfn >> 3) & 0x1f;
- int function = devfn & 0x7;
- u32 addr;
- int config_mode = 1;
-
- /*
- * There is no bridge between the Tile and bus 0, so we
- * use config0 to talk to bus 0.
- *
- * If we're talking to a bus other than zero then we
- * must have found a bridge.
- */
- if (busnum == 0) {
- /*
- * We fake an empty slot for (busnum == 0) && (slot > 0),
- * since there is only one slot on bus 0.
- */
- if (slot) {
- *val = 0xFFFFFFFF;
- return 0;
- }
- config_mode = 0;
- }
-
- addr = busnum << 20; /* Bus in 27:20 */
- addr |= slot << 15; /* Slot (device) in 19:15 */
- addr |= function << 12; /* Function is in 14:12 */
- addr |= (offset & 0xFFF); /* byte address in 0:11 */
-
- return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
- (HV_VirtAddr)(val), size, addr);
-}
-
-
-/*
- * See tile_cfg_read() for relevant comments.
- * Note that "val" is the value to write, not a pointer to that value.
- */
-static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 val)
-{
- struct pci_controller *controller = bus->sysdata;
- int busnum = bus->number & 0xff;
- int slot = (devfn >> 3) & 0x1f;
- int function = devfn & 0x7;
- u32 addr;
- int config_mode = 1;
- HV_VirtAddr valp = (HV_VirtAddr)&val;
-
- /*
- * For bus 0 slot 0 we use config 0 accesses.
- */
- if (busnum == 0) {
- /*
- * We fake an empty slot for (busnum == 0) && (slot > 0),
- * since there is only one slot on bus 0.
- */
- if (slot)
- return 0;
- config_mode = 0;
- }
-
- addr = busnum << 20; /* Bus in 27:20 */
- addr |= slot << 15; /* Slot (device) in 19:15 */
- addr |= function << 12; /* Function is in 14:12 */
- addr |= (offset & 0xFFF); /* byte address in 0:11 */
-
-#ifdef __BIG_ENDIAN
- /* Point to the correct part of the 32-bit "val". */
- valp += 4 - size;
-#endif
-
- return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
- valp, size, addr);
-}
-
-
-static struct pci_ops tile_cfg_ops = {
- .read = tile_cfg_read,
- .write = tile_cfg_write,
-};
-
-
-/*
- * In the following, each PCI controller's mem_resources[1]
- * represents its (non-prefetchable) PCI memory resource.
- * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
- * prefetchable PCI memory resources, respectively.
- * For more details, see pci_setup_bridge() in setup-bus.c.
- * By comparing the target PCI memory address against the
- * end address of controller 0, we can determine the controller
- * that should accept the PCI memory access.
- */
-#define TILE_READ(size, type) \
-type _tile_read##size(unsigned long addr) \
-{ \
- type val; \
- int idx = 0; \
- if (addr > controllers[0].mem_resources[1].end && \
- addr > controllers[0].mem_resources[2].end) \
- idx = 1; \
- if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
- (HV_VirtAddr)(&val), sizeof(type), addr)) \
- pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
- sizeof(type), addr); \
- return val; \
-} \
-EXPORT_SYMBOL(_tile_read##size)
-
-TILE_READ(b, u8);
-TILE_READ(w, u16);
-TILE_READ(l, u32);
-TILE_READ(q, u64);
-
-#define TILE_WRITE(size, type) \
-void _tile_write##size(type val, unsigned long addr) \
-{ \
- int idx = 0; \
- if (addr > controllers[0].mem_resources[1].end && \
- addr > controllers[0].mem_resources[2].end) \
- idx = 1; \
- if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
- (HV_VirtAddr)(&val), sizeof(type), addr)) \
- pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
- sizeof(type), addr); \
-} \
-EXPORT_SYMBOL(_tile_write##size)
-
-TILE_WRITE(b, u8);
-TILE_WRITE(w, u16);
-TILE_WRITE(l, u32);
-TILE_WRITE(q, u64);
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
deleted file mode 100644
index 9aa238ac7b35..000000000000
--- a/arch/tile/kernel/pci_gx.c
+++ /dev/null
@@ -1,1592 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/mmzone.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
-#include <linux/msi.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/ctype.h>
-
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/byteorder.h>
-
-#include <gxio/iorpc_globals.h>
-#include <gxio/kiorpc.h>
-#include <gxio/trio.h>
-#include <gxio/iorpc_trio.h>
-#include <hv/drv_trio_intf.h>
-
-#include <arch/sim.h>
-
-/*
- * This file contains the routines to search for PCI buses,
- * enumerate the buses, and configure any attached devices.
- */
-
-#define DEBUG_PCI_CFG 0
-
-#if DEBUG_PCI_CFG
-#define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
- pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
- size, val, bus, dev, func, offset & 0xFFF);
-#define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
- pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
- size, val, bus, dev, func, offset & 0xFFF);
-#else
-#define TRACE_CFG_WR(...)
-#define TRACE_CFG_RD(...)
-#endif
-
-static int pci_probe = 1;
-
-/* Information on the PCIe RC ports configuration. */
-static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
-
-/*
- * On some platforms with one or more Gx endpoint ports, we need to
- * delay the PCIe RC port probe for a few seconds to work around
- * a HW PCIe link-training bug. The exact delay is specified with
- * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
- * where T is the TRIO instance number, P is the port number and S is
- * the delay in seconds. If the argument is specified, but the delay is
- * not provided, the value will be DEFAULT_RC_DELAY.
- */
-static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
-
-/* Default number of seconds that the PCIe RC port probe can be delayed. */
-#define DEFAULT_RC_DELAY 10
-
-/* The PCI I/O space size in each PCI domain. */
-#define IO_SPACE_SIZE 0x10000
-
-/* Provide shorter versions of some very long constant names. */
-#define AUTO_CONFIG_RC \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
-#define AUTO_CONFIG_RC_G1 \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
-#define AUTO_CONFIG_EP \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
-#define AUTO_CONFIG_EP_G1 \
- TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
-
-/* Array of the PCIe ports configuration info obtained from the BIB. */
-struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
-
-/* Number of configured TRIO instances. */
-int num_trio_shims;
-
-/* All drivers share the TRIO contexts defined here. */
-gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
-
-/* Pointer to an array of PCIe RC controllers. */
-struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
-int num_rc_controllers;
-
-static struct pci_ops tile_cfg_ops;
-
-/* Mask of CPUs that should receive PCIe interrupts. */
-static struct cpumask intr_cpus_map;
-
-/*
- * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
- * For now, we simply send interrupts to non-dataplane CPUs.
- * We may implement methods to allow user to specify the target CPUs,
- * e.g. via boot arguments.
- */
-static int tile_irq_cpu(int irq)
-{
- unsigned int count;
- int i = 0;
- int cpu;
-
- count = cpumask_weight(&intr_cpus_map);
- if (unlikely(count == 0)) {
- pr_warn("intr_cpus_map empty, interrupts will be delivered to dataplane tiles\n");
- return irq % (smp_height * smp_width);
- }
-
- count = irq % count;
- for_each_cpu(cpu, &intr_cpus_map) {
- if (i++ == count)
- break;
- }
- return cpu;
-}
-
-/* Open a file descriptor to the TRIO shim. */
-static int tile_pcie_open(int trio_index)
-{
- gxio_trio_context_t *context = &trio_contexts[trio_index];
- int ret;
- int mac;
-
- /* This opens a file descriptor to the TRIO shim. */
- ret = gxio_trio_init(context, trio_index);
- if (ret < 0)
- goto gxio_trio_init_failure;
-
- /* Allocate an ASID for the kernel. */
- ret = gxio_trio_alloc_asids(context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
- trio_index);
- goto asid_alloc_failure;
- }
-
- context->asid = ret;
-
-#ifdef USE_SHARED_PCIE_CONFIG_REGION
- /*
- * Alloc a PIO region for config access, shared by all MACs per TRIO.
- * This shouldn't fail since the kernel is supposed to the first
- * client of the TRIO's PIO regions.
- */
- ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
- trio_index);
- goto pio_alloc_failure;
- }
-
- context->pio_cfg_index = ret;
-
- /*
- * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
- * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
- */
- ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
- 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
- if (ret < 0) {
- pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
- trio_index);
- goto pio_alloc_failure;
- }
-#endif
-
- /* Get the properties of the PCIe ports on this TRIO instance. */
- ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
- if (ret < 0) {
- pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
- ret, trio_index);
- goto get_port_property_failure;
- }
-
- context->mmio_base_mac =
- iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
- if (context->mmio_base_mac == NULL) {
- pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
- ret, trio_index);
- ret = -ENOMEM;
-
- goto trio_mmio_mapping_failure;
- }
-
- /* Check the port strap state which will override the BIB setting. */
- for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
- TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
- unsigned int reg_offset;
-
- /* Ignore ports that are not specified in the BIB. */
- if (!pcie_ports[trio_index].ports[mac].allow_rc &&
- !pcie_ports[trio_index].ports[mac].allow_ep)
- continue;
-
- reg_offset =
- (TRIO_PCIE_INTFC_PORT_CONFIG <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- port_config.word =
- __gxio_mmio_read(context->mmio_base_mac + reg_offset);
-
- if (port_config.strap_state != AUTO_CONFIG_RC &&
- port_config.strap_state != AUTO_CONFIG_RC_G1) {
- /*
- * If this is really intended to be an EP port, record
- * it so that the endpoint driver will know about it.
- */
- if (port_config.strap_state == AUTO_CONFIG_EP ||
- port_config.strap_state == AUTO_CONFIG_EP_G1)
- pcie_ports[trio_index].ports[mac].allow_ep = 1;
- }
- }
-
- return ret;
-
-trio_mmio_mapping_failure:
-get_port_property_failure:
-asid_alloc_failure:
-#ifdef USE_SHARED_PCIE_CONFIG_REGION
-pio_alloc_failure:
-#endif
- hv_dev_close(context->fd);
-gxio_trio_init_failure:
- context->fd = -1;
-
- return ret;
-}
-
-static int __init tile_trio_init(void)
-{
- int i;
-
- /* We loop over all the TRIO shims. */
- for (i = 0; i < TILEGX_NUM_TRIO; i++) {
- if (tile_pcie_open(i) < 0)
- continue;
- num_trio_shims++;
- }
-
- return 0;
-}
-postcore_initcall(tile_trio_init);
-
-static void tilegx_legacy_irq_ack(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
-}
-
-static void tilegx_legacy_irq_mask(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
-}
-
-static void tilegx_legacy_irq_unmask(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
-}
-
-static struct irq_chip tilegx_legacy_irq_chip = {
- .name = "tilegx_legacy_irq",
- .irq_ack = tilegx_legacy_irq_ack,
- .irq_mask = tilegx_legacy_irq_mask,
- .irq_unmask = tilegx_legacy_irq_unmask,
-
- /* TBD: support set_affinity. */
-};
-
-/*
- * This is a wrapper function of the kernel level-trigger interrupt
- * handler handle_level_irq() for PCI legacy interrupts. The TRIO
- * is configured such that only INTx Assert interrupts are proxied
- * to Linux which just calls handle_level_irq() after clearing the
- * MAC INTx Assert status bit associated with this interrupt.
- */
-static void trio_handle_level_irq(struct irq_desc *desc)
-{
- struct pci_controller *controller = irq_desc_get_handler_data(desc);
- gxio_trio_context_t *trio_context = controller->trio;
- uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
- int mac = controller->mac;
- unsigned int reg_offset;
- uint64_t level_mask;
-
- handle_level_irq(desc);
-
- /*
- * Clear the INTx Level status, otherwise future interrupts are
- * not sent.
- */
- reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;
-
- __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
-}
-
-/*
- * Create kernel irqs and set up the handlers for the legacy interrupts.
- * Also some minimum initialization for the MSI support.
- */
-static int tile_init_irqs(struct pci_controller *controller)
-{
- int i;
- int j;
- int irq;
- int result;
-
- cpumask_copy(&intr_cpus_map, cpu_online_mask);
-
-
- for (i = 0; i < 4; i++) {
- gxio_trio_context_t *context = controller->trio;
- int cpu;
-
- /* Ask the kernel to allocate an IRQ. */
- irq = irq_alloc_hwirq(-1);
- if (!irq) {
- pr_err("PCI: no free irq vectors, failed for %d\n", i);
- goto free_irqs;
- }
- controller->irq_intx_table[i] = irq;
-
- /* Distribute the 4 IRQs to different tiles. */
- cpu = tile_irq_cpu(irq);
-
- /* Configure the TRIO intr binding for this IRQ. */
- result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
- cpu_y(cpu), KERNEL_PL,
- irq, controller->mac, i);
- if (result < 0) {
- pr_err("PCI: MAC intx config failed for %d\n", i);
-
- goto free_irqs;
- }
-
- /* Register the IRQ handler with the kernel. */
- irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
- trio_handle_level_irq);
- irq_set_chip_data(irq, (void *)(uint64_t)i);
- irq_set_handler_data(irq, controller);
- }
-
- return 0;
-
-free_irqs:
- for (j = 0; j < i; j++)
- irq_free_hwirq(controller->irq_intx_table[j]);
-
- return -1;
-}
-
-/*
- * Return 1 if the port is strapped to operate in RC mode.
- */
-static int
-strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
-{
- TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
- unsigned int reg_offset;
-
- /* Check the port configuration. */
- reg_offset =
- (TRIO_PCIE_INTFC_PORT_CONFIG <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
- port_config.word =
- __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);
-
- if (port_config.strap_state == AUTO_CONFIG_RC ||
- port_config.strap_state == AUTO_CONFIG_RC_G1)
- return 1;
- else
- return 0;
-}
-
-/*
- * Find valid controllers and fill in pci_controller structs for each
- * of them.
- *
- * Return the number of controllers discovered.
- */
-int __init tile_pci_init(void)
-{
- int ctl_index = 0;
- int i, j;
-
- if (!pci_probe) {
- pr_info("PCI: disabled by boot argument\n");
- return 0;
- }
-
- pr_info("PCI: Searching for controllers...\n");
-
- if (num_trio_shims == 0 || sim_is_simulator())
- return 0;
-
- /*
- * Now determine which PCIe ports are configured to operate in RC
- * mode. There is a difference in the port configuration capability
- * between the Gx36 and Gx72 devices.
- *
- * The Gx36 has configuration capability for each of the 3 PCIe
- * interfaces (disable, auto endpoint, auto RC, etc.).
- * On the Gx72, you can only select one of the 3 PCIe interfaces per
- * TRIO to train automatically. Further, the allowable training modes
- * are reduced to four options (auto endpoint, auto RC, stream x1,
- * stream x4).
- *
- * For Gx36 ports, it must be allowed to be in RC mode by the
- * Board Information Block, and the hardware strapping pins must be
- * set to RC mode.
- *
- * For Gx72 ports, the port will operate in RC mode if either of the
- * following is true:
- * 1. It is allowed to be in RC mode by the Board Information Block,
- * and the BIB doesn't allow the EP mode.
- * 2. It is allowed to be in either the RC or the EP mode by the BIB,
- * and the hardware strapping pin is set to RC mode.
- */
- for (i = 0; i < TILEGX_NUM_TRIO; i++) {
- gxio_trio_context_t *context = &trio_contexts[i];
-
- if (context->fd < 0)
- continue;
-
- for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
- int is_rc = 0;
-
- if (pcie_ports[i].is_gx72 &&
- pcie_ports[i].ports[j].allow_rc) {
- if (!pcie_ports[i].ports[j].allow_ep ||
- strapped_for_rc(context, j))
- is_rc = 1;
- } else if (pcie_ports[i].ports[j].allow_rc &&
- strapped_for_rc(context, j)) {
- is_rc = 1;
- }
- if (is_rc) {
- pcie_rc[i][j] = 1;
- num_rc_controllers++;
- }
- }
- }
-
- /* Return if no PCIe ports are configured to operate in RC mode. */
- if (num_rc_controllers == 0)
- return 0;
-
- /* Set the TRIO pointer and MAC index for each PCIe RC port. */
- for (i = 0; i < TILEGX_NUM_TRIO; i++) {
- for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
- if (pcie_rc[i][j]) {
- pci_controllers[ctl_index].trio =
- &trio_contexts[i];
- pci_controllers[ctl_index].mac = j;
- pci_controllers[ctl_index].trio_index = i;
- ctl_index++;
- if (ctl_index == num_rc_controllers)
- goto out;
- }
- }
- }
-
-out:
- /* Configure each PCIe RC port. */
- for (i = 0; i < num_rc_controllers; i++) {
-
- /* Configure the PCIe MAC to run in RC mode. */
- struct pci_controller *controller = &pci_controllers[i];
-
- controller->index = i;
- controller->ops = &tile_cfg_ops;
-
- controller->io_space.start = PCIBIOS_MIN_IO +
- (i * IO_SPACE_SIZE);
- controller->io_space.end = controller->io_space.start +
- IO_SPACE_SIZE - 1;
- BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
- controller->io_space.flags = IORESOURCE_IO;
- snprintf(controller->io_space_name,
- sizeof(controller->io_space_name),
- "PCI I/O domain %d", i);
- controller->io_space.name = controller->io_space_name;
-
- /*
- * The PCI memory resource is located above the PA space.
- * For every host bridge, the BAR window or the MMIO aperture
- * is in range [3GB, 4GB - 1] of a 4GB space beyond the
- * PA space.
- */
- controller->mem_offset = TILE_PCI_MEM_START +
- (i * TILE_PCI_BAR_WINDOW_TOP);
- controller->mem_space.start = controller->mem_offset +
- TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
- controller->mem_space.end = controller->mem_offset +
- TILE_PCI_BAR_WINDOW_TOP - 1;
- controller->mem_space.flags = IORESOURCE_MEM;
- snprintf(controller->mem_space_name,
- sizeof(controller->mem_space_name),
- "PCI mem domain %d", i);
- controller->mem_space.name = controller->mem_space_name;
- }
-
- return num_rc_controllers;
-}
-
-/*
- * (pin - 1) converts from the PCI standard's [1:4] convention to
- * a normal [0:3] range.
- */
-static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
-{
- struct pci_controller *controller =
- (struct pci_controller *)dev->sysdata;
- return controller->irq_intx_table[pin - 1];
-}
-
-static void fixup_read_and_payload_sizes(struct pci_controller *controller)
-{
- gxio_trio_context_t *trio_context = controller->trio;
- struct pci_bus *root_bus = controller->root_bus;
- TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
- TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
- unsigned int reg_offset;
- struct pci_bus *child;
- int mac;
- int err;
-
- mac = controller->mac;
-
- /* Set our max read request size to be 4KB. */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_CONTROL <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
- dev_control.max_read_req_sz = 5;
- __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
- dev_control.word);
-
- /*
- * Set the max payload size supported by this Gx PCIe MAC.
- * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
- * experiments have shown that setting MPS to 256 yields the
- * best performance.
- */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_CAP <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
- rc_dev_cap.mps_sup = 1;
- __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
- rc_dev_cap.word);
-
- /* Configure PCI Express MPS setting. */
- list_for_each_entry(child, &root_bus->children, node)
- pcie_bus_configure_settings(child);
-
- /*
- * Set the mac_config register in trio based on the MPS/MRS of the link.
- */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_CONTROL <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
-
- err = gxio_trio_set_mps_mrs(trio_context,
- dev_control.max_payload_size,
- dev_control.max_read_req_sz,
- mac);
- if (err < 0) {
- pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
- mac, controller->trio_index);
- }
-}
-
-static int setup_pcie_rc_delay(char *str)
-{
- unsigned long delay = 0;
- unsigned long trio_index;
- unsigned long mac;
-
- if (str == NULL || !isdigit(*str))
- return -EINVAL;
- trio_index = simple_strtoul(str, (char **)&str, 10);
- if (trio_index >= TILEGX_NUM_TRIO)
- return -EINVAL;
-
- if (*str != ',')
- return -EINVAL;
-
- str++;
- if (!isdigit(*str))
- return -EINVAL;
- mac = simple_strtoul(str, (char **)&str, 10);
- if (mac >= TILEGX_TRIO_PCIES)
- return -EINVAL;
-
- if (*str != '\0') {
- if (*str != ',')
- return -EINVAL;
-
- str++;
- if (!isdigit(*str))
- return -EINVAL;
- delay = simple_strtoul(str, (char **)&str, 10);
- }
-
- rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
- return 0;
-}
-early_param("pcie_rc_delay", setup_pcie_rc_delay);
-
-/* PCI initialization entry point, called by subsys_initcall. */
-int __init pcibios_init(void)
-{
- resource_size_t offset;
- LIST_HEAD(resources);
- int next_busno;
- struct pci_host_bridge *bridge;
- int i;
-
- tile_pci_init();
-
- if (num_rc_controllers == 0)
- return 0;
-
- /*
- * Delay a bit in case devices aren't ready. Some devices are
- * known to require at least 20ms here, but we use a more
- * conservative value.
- */
- msleep(250);
-
- /* Scan all of the recorded PCI controllers. */
- for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
- struct pci_controller *controller = &pci_controllers[i];
- gxio_trio_context_t *trio_context = controller->trio;
- TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
- TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
- struct pci_bus *bus;
- unsigned int reg_offset;
- unsigned int class_code_revision;
- int trio_index;
- int mac;
- int ret;
-
- if (trio_context->fd < 0)
- continue;
-
- trio_index = controller->trio_index;
- mac = controller->mac;
-
- /*
- * Check for PCIe link-up status to decide if we need
- * to force the link to come up.
- */
- reg_offset =
- (TRIO_PCIE_INTFC_PORT_STATUS <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- port_status.word =
- __gxio_mmio_read(trio_context->mmio_base_mac +
- reg_offset);
- if (!port_status.dl_up) {
- if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
- rc_delay[trio_index][mac], mac,
- trio_index);
- msleep(rc_delay[trio_index][mac] * 1000);
- }
- ret = gxio_trio_force_rc_link_up(trio_context, mac);
- if (ret < 0)
- pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
- mac, trio_index);
- }
-
- pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
- i, trio_index, controller->mac);
-
- /* Delay the bus probe if needed. */
- if (rc_delay[trio_index][mac]) {
- pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
- rc_delay[trio_index][mac], mac, trio_index);
- msleep(rc_delay[trio_index][mac] * 1000);
- } else {
- /*
- * Wait a bit here because some EP devices
- * take longer to come up.
- */
- msleep(1000);
- }
-
- /* Check for PCIe link-up status again. */
- port_status.word =
- __gxio_mmio_read(trio_context->mmio_base_mac +
- reg_offset);
- if (!port_status.dl_up) {
- if (pcie_ports[trio_index].ports[mac].removable) {
- pr_info("PCI: link is down, MAC %d on TRIO %d\n",
- mac, trio_index);
- pr_info("This is expected if no PCIe card is connected to this link\n");
- } else
- pr_err("PCI: link is down, MAC %d on TRIO %d\n",
- mac, trio_index);
- continue;
- }
-
- /*
- * Ensure that the link can come out of L1 power down state.
- * Strictly speaking, this is needed only in the case of
- * heavy RC-initiated DMAs.
- */
- reg_offset =
- (TRIO_PCIE_INTFC_TX_FIFO_CTL <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
- tx_fifo_ctl.word =
- __gxio_mmio_read(trio_context->mmio_base_mac +
- reg_offset);
- tx_fifo_ctl.min_p_credits = 0;
- __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
- tx_fifo_ctl.word);
-
- /*
- * Change the device ID so that Linux bus crawl doesn't confuse
- * the internal bridge with any Tilera endpoints.
- */
- reg_offset =
- (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
- (TILERA_GX36_RC_DEV_ID <<
- TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
- TILERA_VENDOR_ID);
-
- /* Set the internal P2P bridge class code. */
- reg_offset =
- (TRIO_PCIE_RC_REVISION_ID <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
- TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- class_code_revision =
- __gxio_mmio_read32(trio_context->mmio_base_mac +
- reg_offset);
- class_code_revision = (class_code_revision & 0xff) |
- (PCI_CLASS_BRIDGE_PCI << 16);
-
- __gxio_mmio_write32(trio_context->mmio_base_mac +
- reg_offset, class_code_revision);
-
-#ifdef USE_SHARED_PCIE_CONFIG_REGION
-
- /* Map in the MMIO space for the PIO region. */
- offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
- (((unsigned long long)mac) <<
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
-
-#else
-
- /* Alloc a PIO region for PCI config access per MAC. */
- ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
- mac, trio_index);
-
- continue;
- }
-
- trio_context->pio_cfg_index[mac] = ret;
-
- /* For PIO CFG, the bus_address_hi parameter is 0. */
- ret = gxio_trio_init_pio_region_aux(trio_context,
- trio_context->pio_cfg_index[mac],
- mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
- if (ret < 0) {
- pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
- mac, trio_index);
-
- continue;
- }
-
- offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
- (((unsigned long long)mac) <<
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
-
-#endif
-
- /*
- * To save VMALLOC space, we take advantage of the fact that
- * bit 29 in the PIO CFG address format is reserved 0. With
- * TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT being 30,
- * this cuts VMALLOC space usage from 1GB to 512MB per mac.
- */
- trio_context->mmio_base_pio_cfg[mac] =
- iorpc_ioremap(trio_context->fd, offset, (1UL <<
- (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
- if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
- pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
- mac, trio_index);
-
- continue;
- }
-
- /* Initialize the PCIe interrupts. */
- if (tile_init_irqs(controller)) {
- pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
- mac, trio_index);
-
- continue;
- }
-
- /*
- * The PCI memory resource is located above the PA space.
- * The memory range for the PCI root bus should not overlap
- * with the physical RAM.
- */
- pci_add_resource_offset(&resources, &controller->mem_space,
- controller->mem_offset);
- pci_add_resource(&resources, &controller->io_space);
- controller->first_busno = next_busno;
-
- bridge = pci_alloc_host_bridge(0);
- if (!bridge)
- break;
-
- list_splice_init(&resources, &bridge->windows);
- bridge->dev.parent = NULL;
- bridge->sysdata = controller;
- bridge->busnr = next_busno;
- bridge->ops = controller->ops;
- bridge->swizzle_irq = pci_common_swizzle;
- bridge->map_irq = tile_map_irq;
-
- pci_scan_root_bus_bridge(bridge);
- bus = bridge->bus;
- controller->root_bus = bus;
- next_busno = bus->busn_res.end + 1;
- }
-
- /*
- * This comes from the generic Linux PCI driver.
- *
- * It allocates all of the resources (I/O memory, etc)
- * associated with the devices read in above.
- */
- pci_assign_unassigned_resources();
-
- /* Record the I/O resources in the PCI controller structure. */
- for (i = 0; i < num_rc_controllers; i++) {
- struct pci_controller *controller = &pci_controllers[i];
- gxio_trio_context_t *trio_context = controller->trio;
- struct pci_bus *root_bus = pci_controllers[i].root_bus;
- int ret;
- int j;
-
- /*
- * Skip controllers that are not properly initialized or
- * have down links.
- */
- if (root_bus == NULL)
- continue;
-
- /* Configure the max_payload_size values for this domain. */
- fixup_read_and_payload_sizes(controller);
-
- /* Alloc a PIO region for PCI memory access for each RC port. */
- ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-
- controller->pio_mem_index = ret;
-
- /*
- * For PIO MEM, the bus_address_hi parameter is hard-coded 0
- * because we always assign 32-bit PCI bus BAR ranges.
- */
- ret = gxio_trio_init_pio_region_aux(trio_context,
- controller->pio_mem_index,
- controller->mac,
- 0,
- 0);
- if (ret < 0) {
- pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-
-#ifdef CONFIG_TILE_PCI_IO
- /*
- * Alloc a PIO region for PCI I/O space access for each RC port.
- */
- ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
- if (ret < 0) {
- pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-
- controller->pio_io_index = ret;
-
- /*
- * For PIO IO, the bus_address_hi parameter is hard-coded 0
- * because PCI I/O address space is 32-bit.
- */
- ret = gxio_trio_init_pio_region_aux(trio_context,
- controller->pio_io_index,
- controller->mac,
- 0,
- HV_TRIO_PIO_FLAG_IO_SPACE);
- if (ret < 0) {
- pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
- controller->trio_index, controller->mac);
-
- continue;
- }
-#endif
-
- /*
- * Configure a Mem-Map region for each memory controller so
- * that Linux can map all of its PA space to the PCI bus.
- * Use the IOMMU to handle hash-for-home memory.
- */
- for_each_online_node(j) {
- unsigned long start_pfn = node_start_pfn[j];
- unsigned long end_pfn = node_end_pfn[j];
- unsigned long nr_pages = end_pfn - start_pfn;
-
- ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
- 0);
- if (ret < 0) {
- pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
- controller->trio_index, controller->mac,
- j);
-
- goto alloc_mem_map_failed;
- }
-
- controller->mem_maps[j] = ret;
-
- /*
- * Initialize the Mem-Map and the I/O MMU so that all
- * the physical memory can be accessed by the endpoint
- * devices. The base bus address is set to the base CPA
- * of this memory controller plus an offset (see pci.h).
- * The region's base VA is set to the base CPA. The
- * I/O MMU table essentially translates the CPA to
- * the real PA. Implicitly, for node 0, we create
- * a separate Mem-Map region that serves as the inbound
- * window for legacy 32-bit devices. This is a direct
- * map of the low 4GB CPA space.
- */
- ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
- controller->mem_maps[j],
- start_pfn << PAGE_SHIFT,
- nr_pages << PAGE_SHIFT,
- trio_context->asid,
- controller->mac,
- (start_pfn << PAGE_SHIFT) +
- TILE_PCI_MEM_MAP_BASE_OFFSET,
- j,
- GXIO_TRIO_ORDER_MODE_UNORDERED);
- if (ret < 0) {
- pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
- controller->trio_index, controller->mac,
- j);
-
- goto alloc_mem_map_failed;
- }
- continue;
-
-alloc_mem_map_failed:
- break;
- }
-
- pci_bus_add_devices(root_bus);
- }
-
- return 0;
-}
-subsys_initcall(pcibios_init);
-
-/* Process any "pci=" kernel boot arguments. */
-char *__init pcibios_setup(char *str)
-{
- if (!strcmp(str, "off")) {
- pci_probe = 0;
- return NULL;
- }
- return str;
-}
-
-/*
- * Called for each device after PCI setup is done.
- * We initialize the PCI device capabilities conservatively, assuming that
- * all devices can only address the 32-bit DMA space. The exception here is
- * that the device dma_offset is set to the value that matches the 64-bit
- * capable devices. This is OK because dma_offset is not used by legacy
- * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
- * This implementation matches the kernel design of setting PCI devices'
- * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
- * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
- */
-static void pcibios_fixup_final(struct pci_dev *pdev)
-{
- set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
- set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
- pdev->dev.archdata.max_direct_dma_addr =
- TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
- pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
-
-/* Map a PCI MMIO bus address into VA space. */
-void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
-{
- struct pci_controller *controller = NULL;
- resource_size_t bar_start;
- resource_size_t bar_end;
- resource_size_t offset;
- resource_size_t start;
- resource_size_t end;
- int trio_fd;
- int i;
-
- start = phys_addr;
- end = phys_addr + size - 1;
-
- /*
- * By searching phys_addr in each controller's mem_space, we can
- * determine the controller that should accept the PCI memory access.
- */
- for (i = 0; i < num_rc_controllers; i++) {
- /*
- * Skip controllers that are not properly initialized or
- * have down links.
- */
- if (pci_controllers[i].root_bus == NULL)
- continue;
-
- bar_start = pci_controllers[i].mem_space.start;
- bar_end = pci_controllers[i].mem_space.end;
-
- if ((start >= bar_start) && (end <= bar_end)) {
- controller = &pci_controllers[i];
- break;
- }
- }
-
- if (controller == NULL)
- return NULL;
-
- trio_fd = controller->trio->fd;
-
- /* Convert the resource start to the bus address offset. */
- start = phys_addr - controller->mem_offset;
-
- offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
-
- /* We need to keep the PCI bus address's in-page offset in the VA. */
- return iorpc_ioremap(trio_fd, offset, size) +
- (start & (PAGE_SIZE - 1));
-}
-EXPORT_SYMBOL(ioremap);
-
-#ifdef CONFIG_TILE_PCI_IO
-/* Map a PCI I/O address into VA space. */
-void __iomem *ioport_map(unsigned long port, unsigned int size)
-{
- struct pci_controller *controller = NULL;
- resource_size_t bar_start;
- resource_size_t bar_end;
- resource_size_t offset;
- resource_size_t start;
- resource_size_t end;
- int trio_fd;
- int i;
-
- start = port;
- end = port + size - 1;
-
- /*
- * By searching the port in each controller's io_space, we can
- * determine the controller that should accept the PCI I/O access.
- */
- for (i = 0; i < num_rc_controllers; i++) {
- /*
- * Skip controllers that are not properly initialized or
- * have down links.
- */
- if (pci_controllers[i].root_bus == NULL)
- continue;
-
- bar_start = pci_controllers[i].io_space.start;
- bar_end = pci_controllers[i].io_space.end;
-
- if ((start >= bar_start) && (end <= bar_end)) {
- controller = &pci_controllers[i];
- break;
- }
- }
-
- if (controller == NULL)
- return NULL;
-
- trio_fd = controller->trio->fd;
-
- /* Convert the resource start to the bus address offset. */
- port -= controller->io_space.start;
-
- offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;
-
- /* We need to keep the PCI bus address's in-page offset in the VA. */
- return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
-}
-EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-EXPORT_SYMBOL(ioport_unmap);
-#endif
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
- iounmap(addr);
-}
-EXPORT_SYMBOL(pci_iounmap);
-
-/****************************************************************
- *
- * Tile PCI config space read/write routines
- *
- ****************************************************************/
-
-/*
- * These are the normal read and write ops
- * These are expanded with macros from pci_bus_read_config_byte() etc.
- *
- * devfn is the combined PCI device & function.
- *
- * offset is in bytes, from the start of config space for the
- * specified bus & device.
- */
-static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 *val)
-{
- struct pci_controller *controller = bus->sysdata;
- gxio_trio_context_t *trio_context = controller->trio;
- int busnum = bus->number & 0xff;
- int device = PCI_SLOT(devfn);
- int function = PCI_FUNC(devfn);
- int config_type = 1;
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
- void *mmio_addr;
-
- /*
- * Map all accesses to the local device on root bus into the
- * MMIO space of the MAC. Accesses to the downstream devices
- * go to the PIO space.
- */
- if (pci_is_root_bus(bus)) {
- if (device == 0) {
- /*
- * This is the internal downstream P2P bridge,
- * access directly.
- */
- unsigned int reg_offset;
-
- reg_offset = ((offset & 0xFFF) <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
- << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (controller->mac <<
- TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- mmio_addr = trio_context->mmio_base_mac + reg_offset;
-
- goto valid_device;
-
- } else {
- /*
- * We fake an empty device for (device > 0),
- * since there is only one device on bus 0.
- */
- goto invalid_device;
- }
- }
-
- /*
- * Accesses to the directly attached device have to be
- * sent as type-0 configs.
- */
- if (busnum == (controller->first_busno + 1)) {
- /*
- * There is only one device off of our built-in P2P bridge.
- */
- if (device != 0)
- goto invalid_device;
-
- config_type = 0;
- }
-
- cfg_addr.word = 0;
- cfg_addr.reg_addr = (offset & 0xFFF);
- cfg_addr.fn = function;
- cfg_addr.dev = device;
- cfg_addr.bus = busnum;
- cfg_addr.type = config_type;
-
- /*
- * Note that we don't set the mac field in cfg_addr because the
- * mapping is per port.
- */
- mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
- cfg_addr.word;
-
-valid_device:
-
- switch (size) {
- case 4:
- *val = __gxio_mmio_read32(mmio_addr);
- break;
-
- case 2:
- *val = __gxio_mmio_read16(mmio_addr);
- break;
-
- case 1:
- *val = __gxio_mmio_read8(mmio_addr);
- break;
-
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
- TRACE_CFG_RD(size, *val, busnum, device, function, offset);
-
- return 0;
-
-invalid_device:
-
- switch (size) {
- case 4:
- *val = 0xFFFFFFFF;
- break;
-
- case 2:
- *val = 0xFFFF;
- break;
-
- case 1:
- *val = 0xFF;
- break;
-
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
- return 0;
-}
-
-
-/*
- * See tile_cfg_read() for relevant comments.
- * Note that "val" is the value to write, not a pointer to that value.
- */
-static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
- int size, u32 val)
-{
- struct pci_controller *controller = bus->sysdata;
- gxio_trio_context_t *trio_context = controller->trio;
- int busnum = bus->number & 0xff;
- int device = PCI_SLOT(devfn);
- int function = PCI_FUNC(devfn);
- int config_type = 1;
- TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
- void *mmio_addr;
- u32 val_32 = (u32)val;
- u16 val_16 = (u16)val;
- u8 val_8 = (u8)val;
-
- /*
- * Map all accesses to the local device on root bus into the
- * MMIO space of the MAC. Accesses to the downstream devices
- * go to the PIO space.
- */
- if (pci_is_root_bus(bus)) {
- if (device == 0) {
- /*
- * This is the internal downstream P2P bridge,
- * access directly.
- */
- unsigned int reg_offset;
-
- reg_offset = ((offset & 0xFFF) <<
- TRIO_CFG_REGION_ADDR__REG_SHIFT) |
- (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
- << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
- (controller->mac <<
- TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
-
- mmio_addr = trio_context->mmio_base_mac + reg_offset;
-
- goto valid_device;
-
- } else {
- /*
- * We fake an empty device for (device > 0),
- * since there is only one device on bus 0.
- */
- goto invalid_device;
- }
- }
-
- /*
- * Accesses to the directly attached device have to be
- * sent as type-0 configs.
- */
- if (busnum == (controller->first_busno + 1)) {
- /*
- * There is only one device off of our built-in P2P bridge.
- */
- if (device != 0)
- goto invalid_device;
-
- config_type = 0;
- }
-
- cfg_addr.word = 0;
- cfg_addr.reg_addr = (offset & 0xFFF);
- cfg_addr.fn = function;
- cfg_addr.dev = device;
- cfg_addr.bus = busnum;
- cfg_addr.type = config_type;
-
- /*
- * Note that we don't set the mac field in cfg_addr because the
- * mapping is per port.
- */
- mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
- cfg_addr.word;
-
-valid_device:
-
- switch (size) {
- case 4:
- __gxio_mmio_write32(mmio_addr, val_32);
- TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
- break;
-
- case 2:
- __gxio_mmio_write16(mmio_addr, val_16);
- TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
- break;
-
- case 1:
- __gxio_mmio_write8(mmio_addr, val_8);
- TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
- break;
-
- default:
- return PCIBIOS_FUNC_NOT_SUPPORTED;
- }
-
-invalid_device:
-
- return 0;
-}
-
-
-static struct pci_ops tile_cfg_ops = {
- .read = tile_cfg_read,
- .write = tile_cfg_write,
-};
-
-
-/* MSI support starts here. */
-static unsigned int tilegx_msi_startup(struct irq_data *d)
-{
- if (irq_data_get_msi_desc(d))
- pci_msi_unmask_irq(d);
-
- return 0;
-}
-
-static void tilegx_msi_ack(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
-}
-
-static void tilegx_msi_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
-}
-
-static void tilegx_msi_unmask(struct irq_data *d)
-{
- __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
- pci_msi_unmask_irq(d);
-}
-
-static struct irq_chip tilegx_msi_chip = {
- .name = "tilegx_msi",
- .irq_startup = tilegx_msi_startup,
- .irq_ack = tilegx_msi_ack,
- .irq_mask = tilegx_msi_mask,
- .irq_unmask = tilegx_msi_unmask,
-
- /* TBD: support set_affinity. */
-};
-
-int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
-{
- struct pci_controller *controller;
- gxio_trio_context_t *trio_context;
- struct msi_msg msg;
- int default_irq;
- uint64_t mem_map_base;
- uint64_t mem_map_limit;
- u64 msi_addr;
- int mem_map;
- int cpu;
- int irq;
- int ret;
-
- irq = irq_alloc_hwirq(-1);
- if (!irq)
- return -ENOSPC;
-
- /*
- * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
- * devices that are not capable of generating a 64-bit message address.
- * These devices will fall back to using the legacy interrupts.
- * Most PCIe endpoint devices do support 64-bit message addressing.
- */
- if (desc->msi_attrib.is_64 == 0) {
- dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
-
- ret = -ENOMEM;
- goto is_64_failure;
- }
-
- default_irq = desc->msi_attrib.default_irq;
- controller = irq_get_handler_data(default_irq);
-
- BUG_ON(!controller);
-
- trio_context = controller->trio;
-
- /*
- * Allocate a scatter-queue that will accept the MSI write and
- * trigger the TILE-side interrupts. We use the scatter-queue regions
- * before the mem map regions, because the latter are needed by more
- * applications.
- */
- mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
- if (mem_map >= 0) {
- TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
- .pop = 0,
- .doorbell = 1,
- }};
-
- mem_map += TRIO_NUM_MAP_MEM_REGIONS;
- mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
- mem_map * MEM_MAP_INTR_REGION_SIZE;
- mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
-
- msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
- msg.data = (unsigned int)doorbell_template.word;
- } else {
- /* SQ regions are out, allocate from map mem regions. */
- mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
- if (mem_map < 0) {
- dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
- desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
- ret = -ENOMEM;
- goto msi_mem_map_alloc_failure;
- }
-
- mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
- mem_map * MEM_MAP_INTR_REGION_SIZE;
- mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
-
- msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
- TRIO_MAP_MEM_REG_INT0;
-
- msg.data = mem_map;
- }
-
- /* We try to distribute different IRQs to different tiles. */
- cpu = tile_irq_cpu(irq);
-
- /*
- * Now call up to the HV to configure the MSI interrupt and
- * set up the IPI binding.
- */
- ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
- KERNEL_PL, irq, controller->mac,
- mem_map, mem_map_base, mem_map_limit,
- trio_context->asid);
- if (ret < 0) {
- dev_info(&pdev->dev, "HV MSI config failed\n");
-
- goto hv_msi_config_failure;
- }
-
- irq_set_msi_desc(irq, desc);
-
- msg.address_hi = msi_addr >> 32;
- msg.address_lo = msi_addr & 0xffffffff;
-
- pci_write_msi_msg(irq, &msg);
- irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
- irq_set_handler_data(irq, controller);
-
- return 0;
-
-hv_msi_config_failure:
- /* Free mem-map */
-msi_mem_map_alloc_failure:
-is_64_failure:
- irq_free_hwirq(irq);
- return ret;
-}
-
-void arch_teardown_msi_irq(unsigned int irq)
-{
- irq_free_hwirq(irq);
-}
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
deleted file mode 100644
index 6394c1ccb68e..000000000000
--- a/arch/tile/kernel/perf_event.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-/*
- * Copyright 2014 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- * Perf_events support for Tile processor.
- *
- * This code is based upon the x86 perf event
- * code, which is:
- *
- * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2009 Jaswinder Singh Rajput
- * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
- * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
- * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
- * Copyright (C) 2009 Google, Inc., Stephane Eranian
- */
-
-#include <linux/kprobes.h>
-#include <linux/kernel.h>
-#include <linux/kdebug.h>
-#include <linux/mutex.h>
-#include <linux/bitmap.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-#include <linux/atomic.h>
-#include <asm/traps.h>
-#include <asm/stack.h>
-#include <asm/pmc.h>
-#include <hv/hypervisor.h>
-
-#define TILE_MAX_COUNTERS 4
-
-#define PERF_COUNT_0_IDX 0
-#define PERF_COUNT_1_IDX 1
-#define AUX_PERF_COUNT_0_IDX 2
-#define AUX_PERF_COUNT_1_IDX 3
-
-struct cpu_hw_events {
- int n_events;
- struct perf_event *events[TILE_MAX_COUNTERS]; /* counter order */
- struct perf_event *event_list[TILE_MAX_COUNTERS]; /* enabled
- order */
- int assign[TILE_MAX_COUNTERS];
- unsigned long active_mask[BITS_TO_LONGS(TILE_MAX_COUNTERS)];
- unsigned long used_mask;
-};
-
-/* TILE arch specific performance monitor unit */
-struct tile_pmu {
- const char *name;
- int version;
- const int *hw_events; /* generic hw events table */
- /* generic hw cache events table */
- const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
- int (*map_hw_event)(u64); /*method used to map
- hw events */
- int (*map_cache_event)(u64); /*method used to map
- cache events */
-
- u64 max_period; /* max sampling period */
- u64 cntval_mask; /* counter width mask */
- int cntval_bits; /* counter width */
- int max_events; /* max generic hw events
- in map */
- int num_counters; /* number base + aux counters */
- int num_base_counters; /* number base counters */
-};
-
-DEFINE_PER_CPU(u64, perf_irqs);
-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
-
-#define TILE_OP_UNSUPP (-1)
-
-#ifndef __tilegx__
-/* TILEPro hardware events map */
-static const int tile_hw_event_map[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 0x01, /* ONE */
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x06, /* MP_BUNDLE_RETIRED */
- [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x16, /*
- MP_CONDITIONAL_BRANCH_ISSUED */
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x14, /*
- MP_CONDITIONAL_BRANCH_MISSPREDICT */
- [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP,
-};
-#else
-/* TILEGx hardware events map */
-static const int tile_hw_event_map[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = 0x181, /* ONE */
- [PERF_COUNT_HW_INSTRUCTIONS] = 0xdb, /* INSTRUCTION_BUNDLE */
- [PERF_COUNT_HW_CACHE_REFERENCES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_CACHE_MISSES] = TILE_OP_UNSUPP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xd9, /*
- COND_BRANCH_PRED_CORRECT */
- [PERF_COUNT_HW_BRANCH_MISSES] = 0xda, /*
- COND_BRANCH_PRED_INCORRECT */
- [PERF_COUNT_HW_BUS_CYCLES] = TILE_OP_UNSUPP,
-};
-#endif
-
-#define C(x) PERF_COUNT_HW_CACHE_##x
-
-/*
- * Generalized hw caching related hw_event table, filled
- * in on a per model basis. A value of -1 means
- * 'not supported', any other value means the
- * raw hw_event ID.
- */
-#ifndef __tilegx__
-/* TILEPro hardware cache event map */
-static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-[C(L1D)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x21, /* RD_MISS */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x22, /* WR_MISS */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x12, /* MP_ICACHE_HIT_ISSUED */
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x1d, /* TLB_CNT */
- [C(RESULT_MISS)] = 0x20, /* TLB_EXCEPTION */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x13, /* MP_ITLB_HIT_ISSUED */
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-};
-#else
-/* TILEGx hardware events map */
-static const int tile_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-[C(L1D)] = {
- /*
- * Like some other architectures (e.g. ARM), the performance
- * counters don't differentiate between read and write
- * accesses/misses, so this isn't strictly correct, but it's the
- * best we can do. Writes and reads get combined.
- */
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x44, /* RD_MISS */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0x45, /* WR_MISS */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */
- [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = 0x40, /* TLB_CNT */
- [C(RESULT_MISS)] = 0x43, /* TLB_EXCEPTION */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = 0xd4, /* ITLB_MISS_INT */
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-[C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = TILE_OP_UNSUPP,
- [C(RESULT_MISS)] = TILE_OP_UNSUPP,
- },
-},
-};
-#endif
-
-static atomic_t tile_active_events;
-static DEFINE_MUTEX(perf_intr_reserve_mutex);
-
-static int tile_map_hw_event(u64 config);
-static int tile_map_cache_event(u64 config);
-
-static int tile_pmu_handle_irq(struct pt_regs *regs, int fault);
-
-/*
- * To avoid new_raw_count getting larger then pre_raw_count
- * in tile_perf_event_update(), we limit the value of max_period to 2^31 - 1.
- */
-static const struct tile_pmu tilepmu = {
-#ifndef __tilegx__
- .name = "tilepro",
-#else
- .name = "tilegx",
-#endif
- .max_events = ARRAY_SIZE(tile_hw_event_map),
- .map_hw_event = tile_map_hw_event,
- .hw_events = tile_hw_event_map,
- .map_cache_event = tile_map_cache_event,
- .cache_events = &tile_cache_event_map,
- .cntval_bits = 32,
- .cntval_mask = (1ULL << 32) - 1,
- .max_period = (1ULL << 31) - 1,
- .num_counters = TILE_MAX_COUNTERS,
- .num_base_counters = TILE_BASE_COUNTERS,
-};
-
-static const struct tile_pmu *tile_pmu __read_mostly;
-
-/*
- * Check whether perf event is enabled.
- */
-int tile_perf_enabled(void)
-{
- return atomic_read(&tile_active_events) != 0;
-}
-
-/*
- * Read Performance Counters.
- */
-static inline u64 read_counter(int idx)
-{
- u64 val = 0;
-
- /* __insn_mfspr() only takes an immediate argument */
- switch (idx) {
- case PERF_COUNT_0_IDX:
- val = __insn_mfspr(SPR_PERF_COUNT_0);
- break;
- case PERF_COUNT_1_IDX:
- val = __insn_mfspr(SPR_PERF_COUNT_1);
- break;
- case AUX_PERF_COUNT_0_IDX:
- val = __insn_mfspr(SPR_AUX_PERF_COUNT_0);
- break;
- case AUX_PERF_COUNT_1_IDX:
- val = __insn_mfspr(SPR_AUX_PERF_COUNT_1);
- break;
- default:
- WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX ||
- idx < PERF_COUNT_0_IDX);
- }
-
- return val;
-}
-
-/*
- * Write Performance Counters.
- */
-static inline void write_counter(int idx, u64 value)
-{
- /* __insn_mtspr() only takes an immediate argument */
- switch (idx) {
- case PERF_COUNT_0_IDX:
- __insn_mtspr(SPR_PERF_COUNT_0, value);
- break;
- case PERF_COUNT_1_IDX:
- __insn_mtspr(SPR_PERF_COUNT_1, value);
- break;
- case AUX_PERF_COUNT_0_IDX:
- __insn_mtspr(SPR_AUX_PERF_COUNT_0, value);
- break;
- case AUX_PERF_COUNT_1_IDX:
- __insn_mtspr(SPR_AUX_PERF_COUNT_1, value);
- break;
- default:
- WARN_ON_ONCE(idx > AUX_PERF_COUNT_1_IDX ||
- idx < PERF_COUNT_0_IDX);
- }
-}
-
-/*
- * Enable performance event by setting
- * Performance Counter Control registers.
- */
-static inline void tile_pmu_enable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- unsigned long cfg, mask;
- int shift, idx = hwc->idx;
-
- /*
- * prevent early activation from tile_pmu_start() in hw_perf_enable
- */
-
- if (WARN_ON_ONCE(idx == -1))
- return;
-
- if (idx < tile_pmu->num_base_counters)
- cfg = __insn_mfspr(SPR_PERF_COUNT_CTL);
- else
- cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL);
-
- switch (idx) {
- case PERF_COUNT_0_IDX:
- case AUX_PERF_COUNT_0_IDX:
- mask = TILE_EVENT_MASK;
- shift = 0;
- break;
- case PERF_COUNT_1_IDX:
- case AUX_PERF_COUNT_1_IDX:
- mask = TILE_EVENT_MASK << 16;
- shift = 16;
- break;
- default:
- WARN_ON_ONCE(idx < PERF_COUNT_0_IDX ||
- idx > AUX_PERF_COUNT_1_IDX);
- return;
- }
-
- /* Clear mask bits to enable the event. */
- cfg &= ~mask;
- cfg |= hwc->config << shift;
-
- if (idx < tile_pmu->num_base_counters)
- __insn_mtspr(SPR_PERF_COUNT_CTL, cfg);
- else
- __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg);
-}
-
-/*
- * Disable performance event by clearing
- * Performance Counter Control registers.
- */
-static inline void tile_pmu_disable_event(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- unsigned long cfg, mask;
- int idx = hwc->idx;
-
- if (idx == -1)
- return;
-
- if (idx < tile_pmu->num_base_counters)
- cfg = __insn_mfspr(SPR_PERF_COUNT_CTL);
- else
- cfg = __insn_mfspr(SPR_AUX_PERF_COUNT_CTL);
-
- switch (idx) {
- case PERF_COUNT_0_IDX:
- case AUX_PERF_COUNT_0_IDX:
- mask = TILE_PLM_MASK;
- break;
- case PERF_COUNT_1_IDX:
- case AUX_PERF_COUNT_1_IDX:
- mask = TILE_PLM_MASK << 16;
- break;
- default:
- WARN_ON_ONCE(idx < PERF_COUNT_0_IDX ||
- idx > AUX_PERF_COUNT_1_IDX);
- return;
- }
-
- /* Set mask bits to disable the event. */
- cfg |= mask;
-
- if (idx < tile_pmu->num_base_counters)
- __insn_mtspr(SPR_PERF_COUNT_CTL, cfg);
- else
- __insn_mtspr(SPR_AUX_PERF_COUNT_CTL, cfg);
-}
-
-/*
- * Propagate event elapsed time into the generic event.
- * Can only be executed on the CPU where the event is active.
- * Returns the delta events processed.
- */
-static u64 tile_perf_event_update(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int shift = 64 - tile_pmu->cntval_bits;
- u64 prev_raw_count, new_raw_count;
- u64 oldval;
- int idx = hwc->idx;
- u64 delta;
-
- /*
- * Careful: an NMI might modify the previous event value.
- *
- * Our tactic to handle this is to first atomically read and
- * exchange a new raw count - then add that new-prev delta
- * count to the generic event atomically:
- */
-again:
- prev_raw_count = local64_read(&hwc->prev_count);
- new_raw_count = read_counter(idx);
-
- oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count);
- if (oldval != prev_raw_count)
- goto again;
-
- /*
- * Now we have the new raw value and have updated the prev
- * timestamp already. We can now calculate the elapsed delta
- * (event-)time and add that to the generic event.
- *
- * Careful, not all hw sign-extends above the physical width
- * of the count.
- */
- delta = (new_raw_count << shift) - (prev_raw_count << shift);
- delta >>= shift;
-
- local64_add(delta, &event->count);
- local64_sub(delta, &hwc->period_left);
-
- return new_raw_count;
-}
-
-/*
- * Set the next IRQ period, based on the hwc->period_left value.
- * To be called with the event disabled in hw:
- */
-static int tile_event_set_period(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- s64 left = local64_read(&hwc->period_left);
- s64 period = hwc->sample_period;
- int ret = 0;
-
- /*
- * If we are way outside a reasonable range then just skip forward:
- */
- if (unlikely(left <= -period)) {
- left = period;
- local64_set(&hwc->period_left, left);
- hwc->last_period = period;
- ret = 1;
- }
-
- if (unlikely(left <= 0)) {
- left += period;
- local64_set(&hwc->period_left, left);
- hwc->last_period = period;
- ret = 1;
- }
- if (left > tile_pmu->max_period)
- left = tile_pmu->max_period;
-
- /*
- * The hw event starts counting from this event offset,
- * mark it to be able to extra future deltas:
- */
- local64_set(&hwc->prev_count, (u64)-left);
-
- write_counter(idx, (u64)(-left) & tile_pmu->cntval_mask);
-
- perf_event_update_userpage(event);
-
- return ret;
-}
-
-/*
- * Stop the event but do not release the PMU counter
- */
-static void tile_pmu_stop(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- if (__test_and_clear_bit(idx, cpuc->active_mask)) {
- tile_pmu_disable_event(event);
- cpuc->events[hwc->idx] = NULL;
- WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
- hwc->state |= PERF_HES_STOPPED;
- }
-
- if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
- /*
- * Drain the remaining delta count out of a event
- * that we are disabling:
- */
- tile_perf_event_update(event);
- hwc->state |= PERF_HES_UPTODATE;
- }
-}
-
-/*
- * Start an event (without re-assigning counter)
- */
-static void tile_pmu_start(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int idx = event->hw.idx;
-
- if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
- return;
-
- if (WARN_ON_ONCE(idx == -1))
- return;
-
- if (flags & PERF_EF_RELOAD) {
- WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
- tile_event_set_period(event);
- }
-
- event->hw.state = 0;
-
- cpuc->events[idx] = event;
- __set_bit(idx, cpuc->active_mask);
-
- unmask_pmc_interrupts();
-
- tile_pmu_enable_event(event);
-
- perf_event_update_userpage(event);
-}
-
-/*
- * Add a single event to the PMU.
- *
- * The event is added to the group of enabled events
- * but only if it can be scehduled with existing events.
- */
-static int tile_pmu_add(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct hw_perf_event *hwc;
- unsigned long mask;
- int b, max_cnt;
-
- hwc = &event->hw;
-
- /*
- * We are full.
- */
- if (cpuc->n_events == tile_pmu->num_counters)
- return -ENOSPC;
-
- cpuc->event_list[cpuc->n_events] = event;
- cpuc->n_events++;
-
- hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
- if (!(flags & PERF_EF_START))
- hwc->state |= PERF_HES_ARCH;
-
- /*
- * Find first empty counter.
- */
- max_cnt = tile_pmu->num_counters;
- mask = ~cpuc->used_mask;
-
- /* Find next free counter. */
- b = find_next_bit(&mask, max_cnt, 0);
-
- /* Should not happen. */
- if (WARN_ON_ONCE(b == max_cnt))
- return -ENOSPC;
-
- /*
- * Assign counter to event.
- */
- event->hw.idx = b;
- __set_bit(b, &cpuc->used_mask);
-
- /*
- * Start if requested.
- */
- if (flags & PERF_EF_START)
- tile_pmu_start(event, PERF_EF_RELOAD);
-
- return 0;
-}
-
-/*
- * Delete a single event from the PMU.
- *
- * The event is deleted from the group of enabled events.
- * If it is the last event, disable PMU interrupt.
- */
-static void tile_pmu_del(struct perf_event *event, int flags)
-{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- int i;
-
- /*
- * Remove event from list, compact list if necessary.
- */
- for (i = 0; i < cpuc->n_events; i++) {
- if (cpuc->event_list[i] == event) {
- while (++i < cpuc->n_events)
- cpuc->event_list[i-1] = cpuc->event_list[i];
- --cpuc->n_events;
- cpuc->events[event->hw.idx] = NULL;
- __clear_bit(event->hw.idx, &cpuc->used_mask);
- tile_pmu_stop(event, PERF_EF_UPDATE);
- break;
- }
- }
- /*
- * If there are no events left, then mask PMU interrupt.
- */
- if (cpuc->n_events == 0)
- mask_pmc_interrupts();
- perf_event_update_userpage(event);
-}
-
-/*
- * Propagate event elapsed time into the event.
- */
-static inline void tile_pmu_read(struct perf_event *event)
-{
- tile_perf_event_update(event);
-}
-
-/*
- * Map generic events to Tile PMU.
- */
-static int tile_map_hw_event(u64 config)
-{
- if (config >= tile_pmu->max_events)
- return -EINVAL;
- return tile_pmu->hw_events[config];
-}
-
-/*
- * Map generic hardware cache events to Tile PMU.
- */
-static int tile_map_cache_event(u64 config)
-{
- unsigned int cache_type, cache_op, cache_result;
- int code;
-
- if (!tile_pmu->cache_events)
- return -ENOENT;
-
- cache_type = (config >> 0) & 0xff;
- if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
- return -EINVAL;
-
- cache_op = (config >> 8) & 0xff;
- if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
- return -EINVAL;
-
- cache_result = (config >> 16) & 0xff;
- if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
- return -EINVAL;
-
- code = (*tile_pmu->cache_events)[cache_type][cache_op][cache_result];
- if (code == TILE_OP_UNSUPP)
- return -EINVAL;
-
- return code;
-}
-
-static void tile_event_destroy(struct perf_event *event)
-{
- if (atomic_dec_return(&tile_active_events) == 0)
- release_pmc_hardware();
-}
-
-static int __tile_event_init(struct perf_event *event)
-{
- struct perf_event_attr *attr = &event->attr;
- struct hw_perf_event *hwc = &event->hw;
- int code;
-
- switch (attr->type) {
- case PERF_TYPE_HARDWARE:
- code = tile_pmu->map_hw_event(attr->config);
- break;
- case PERF_TYPE_HW_CACHE:
- code = tile_pmu->map_cache_event(attr->config);
- break;
- case PERF_TYPE_RAW:
- code = attr->config & TILE_EVENT_MASK;
- break;
- default:
- /* Should not happen. */
- return -EOPNOTSUPP;
- }
-
- if (code < 0)
- return code;
-
- hwc->config = code;
- hwc->idx = -1;
-
- if (attr->exclude_user)
- hwc->config |= TILE_CTL_EXCL_USER;
-
- if (attr->exclude_kernel)
- hwc->config |= TILE_CTL_EXCL_KERNEL;
-
- if (attr->exclude_hv)
- hwc->config |= TILE_CTL_EXCL_HV;
-
- if (!hwc->sample_period) {
- hwc->sample_period = tile_pmu->max_period;
- hwc->last_period = hwc->sample_period;
- local64_set(&hwc->period_left, hwc->sample_period);
- }
- event->destroy = tile_event_destroy;
- return 0;
-}
-
-static int tile_event_init(struct perf_event *event)
-{
- int err = 0;
- perf_irq_t old_irq_handler = NULL;
-
- if (atomic_inc_return(&tile_active_events) == 1)
- old_irq_handler = reserve_pmc_hardware(tile_pmu_handle_irq);
-
- if (old_irq_handler) {
- pr_warn("PMC hardware busy (reserved by oprofile)\n");
-
- atomic_dec(&tile_active_events);
- return -EBUSY;
- }
-
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
- return -ENOENT;
- }
-
- err = __tile_event_init(event);
- if (err) {
- if (event->destroy)
- event->destroy(event);
- }
- return err;
-}
-
-static struct pmu tilera_pmu = {
- .event_init = tile_event_init,
- .add = tile_pmu_add,
- .del = tile_pmu_del,
-
- .start = tile_pmu_start,
- .stop = tile_pmu_stop,
-
- .read = tile_pmu_read,
-};
-
-/*
- * PMU's IRQ handler, PMU has 2 interrupts, they share the same handler.
- */
-int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
-{
- struct perf_sample_data data;
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct perf_event *event;
- struct hw_perf_event *hwc;
- u64 val;
- unsigned long status;
- int bit;
-
- __this_cpu_inc(perf_irqs);
-
- if (!atomic_read(&tile_active_events))
- return 0;
-
- status = pmc_get_overflow();
- pmc_ack_overflow(status);
-
- for_each_set_bit(bit, &status, tile_pmu->num_counters) {
-
- event = cpuc->events[bit];
-
- if (!event)
- continue;
-
- if (!test_bit(bit, cpuc->active_mask))
- continue;
-
- hwc = &event->hw;
-
- val = tile_perf_event_update(event);
- if (val & (1ULL << (tile_pmu->cntval_bits - 1)))
- continue;
-
- perf_sample_data_init(&data, 0, event->hw.last_period);
- if (!tile_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- tile_pmu_stop(event, 0);
- }
-
- return 0;
-}
-
-static bool __init supported_pmu(void)
-{
- tile_pmu = &tilepmu;
- return true;
-}
-
-int __init init_hw_perf_events(void)
-{
- supported_pmu();
- perf_pmu_register(&tilera_pmu, "cpu", PERF_TYPE_RAW);
- return 0;
-}
-arch_initcall(init_hw_perf_events);
-
-/* Callchain handling code. */
-
-/*
- * Tile specific backtracing code for perf_events.
- */
-static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- struct KBacktraceIterator kbt;
- unsigned int i;
-
- /*
- * Get the address just after the "jalr" instruction that
- * jumps to the handler for a syscall. When we find this
- * address in a backtrace, we silently ignore it, which gives
- * us a one-step backtrace connection from the sys_xxx()
- * function in the kernel to the xxx() function in libc.
- * Otherwise, we lose the ability to properly attribute time
- * from the libc calls to the kernel implementations, since
- * oprofile only considers PCs from backtraces a pair at a time.
- */
- unsigned long handle_syscall_pc = handle_syscall_link_address();
-
- KBacktraceIterator_init(&kbt, NULL, regs);
- kbt.profile = 1;
-
- /*
- * The sample for the pc is already recorded. Now we are adding the
- * address of the callsites on the stack. Our iterator starts
- * with the frame of the (already sampled) call site. If our
- * iterator contained a "return address" field, we could have just
- * used it and wouldn't have needed to skip the first
- * frame. That's in effect what the arm and x86 versions do.
- * Instead we peel off the first iteration to get the equivalent
- * behavior.
- */
-
- if (KBacktraceIterator_end(&kbt))
- return;
- KBacktraceIterator_next(&kbt);
-
- /*
- * Set stack depth to 16 for user and kernel space respectively, that
- * is, total 32 stack frames.
- */
- for (i = 0; i < 16; ++i) {
- unsigned long pc;
- if (KBacktraceIterator_end(&kbt))
- break;
- pc = kbt.it.pc;
- if (pc != handle_syscall_pc)
- perf_callchain_store(entry, pc);
- KBacktraceIterator_next(&kbt);
- }
-}
-
-void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- perf_callchain(entry, regs);
-}
-
-void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- perf_callchain(entry, regs);
-}
diff --git a/arch/tile/kernel/pmc.c b/arch/tile/kernel/pmc.c
deleted file mode 100644
index 81cf8743a3f3..000000000000
--- a/arch/tile/kernel/pmc.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright 2014 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/atomic.h>
-
-#include <asm/processor.h>
-#include <asm/pmc.h>
-
-perf_irq_t perf_irq = NULL;
-int handle_perf_interrupt(struct pt_regs *regs, int fault)
-{
- int retval;
-
- if (!perf_irq)
- panic("Unexpected PERF_COUNT interrupt %d\n", fault);
-
- retval = perf_irq(regs, fault);
- return retval;
-}
-
-/* Reserve PMC hardware if it is available. */
-perf_irq_t reserve_pmc_hardware(perf_irq_t new_perf_irq)
-{
- return cmpxchg(&perf_irq, NULL, new_perf_irq);
-}
-EXPORT_SYMBOL(reserve_pmc_hardware);
-
-/* Release PMC hardware. */
-void release_pmc_hardware(void)
-{
- perf_irq = NULL;
-}
-EXPORT_SYMBOL(release_pmc_hardware);
-
-
-/*
- * Get current overflow status of each performance counter,
- * and auxiliary performance counter.
- */
-unsigned long
-pmc_get_overflow(void)
-{
- unsigned long status;
-
- /*
- * merge base+aux into a single vector
- */
- status = __insn_mfspr(SPR_PERF_COUNT_STS);
- status |= __insn_mfspr(SPR_AUX_PERF_COUNT_STS) << TILE_BASE_COUNTERS;
- return status;
-}
-
-/*
- * Clear the status bit for the corresponding counter, if written
- * with a one.
- */
-void
-pmc_ack_overflow(unsigned long status)
-{
- /*
- * clear overflow status by writing ones
- */
- __insn_mtspr(SPR_PERF_COUNT_STS, status);
- __insn_mtspr(SPR_AUX_PERF_COUNT_STS, status >> TILE_BASE_COUNTERS);
-}
-
-/*
- * The perf count interrupts are masked and unmasked explicitly,
- * and only here. The normal irq_enable() does not enable them,
- * and irq_disable() does not disable them. That lets these
- * routines drive the perf count interrupts orthogonally.
- *
- * We also mask the perf count interrupts on entry to the perf count
- * interrupt handler in assembly code, and by default unmask them
- * again (with interrupt critical section protection) just before
- * returning from the interrupt. If the perf count handler returns
- * a non-zero error code, then we don't re-enable them before returning.
- *
- * For Pro, we rely on both interrupts being in the same word to update
- * them atomically so we never have one enabled and one disabled.
- */
-
-#if CHIP_HAS_SPLIT_INTR_MASK()
-# if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32
-# error Fix assumptions about which word PERF_COUNT interrupts are in
-# endif
-#endif
-
-static inline unsigned long long pmc_mask(void)
-{
- unsigned long long mask = 1ULL << INT_PERF_COUNT;
- mask |= 1ULL << INT_AUX_PERF_COUNT;
- return mask;
-}
-
-void unmask_pmc_interrupts(void)
-{
- interrupt_mask_reset_mask(pmc_mask());
-}
-
-void mask_pmc_interrupts(void)
-{
- interrupt_mask_set_mask(pmc_mask());
-}
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
deleted file mode 100644
index 7983e9868df6..000000000000
--- a/arch/tile/kernel/proc.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/smp.h>
-#include <linux/seq_file.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/timex.h>
-#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/sysctl.h>
-#include <linux/hardirq.h>
-#include <linux/hugetlb.h>
-#include <linux/mman.h>
-#include <asm/unaligned.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/sections.h>
-#include <asm/homecache.h>
-#include <asm/hardwall.h>
-#include <arch/chip.h>
-
-
-/*
- * Support /proc/cpuinfo
- */
-
-#define cpu_to_ptr(n) ((void *)((long)(n)+1))
-#define ptr_to_cpu(p) ((long)(p) - 1)
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- int n = ptr_to_cpu(v);
-
- if (n == 0) {
- seq_printf(m, "cpu count\t: %d\n", num_online_cpus());
- seq_printf(m, "cpu list\t: %*pbl\n",
- cpumask_pr_args(cpu_online_mask));
- seq_printf(m, "model name\t: %s\n", chip_model);
- seq_printf(m, "flags\t\t:\n"); /* nothing for now */
- seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n",
- get_clock_rate() / 1000000,
- (get_clock_rate() % 1000000));
- seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
- loops_per_jiffy/(500000/HZ),
- (loops_per_jiffy/(5000/HZ)) % 100);
- }
-
-#ifdef CONFIG_SMP
- if (!cpu_online(n))
- return 0;
-#endif
-
- seq_printf(m, "processor\t: %d\n", n);
-
- /* Print only num_online_cpus() blank lines total. */
- if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids)
- seq_printf(m, "\n");
-
- return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
-}
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return c_start(m, pos);
-}
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-const struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
-
-/*
- * Support /proc/tile directory
- */
-
-static int __init proc_tile_init(void)
-{
- struct proc_dir_entry *root = proc_mkdir("tile", NULL);
- if (root == NULL)
- return 0;
-
- proc_tile_hardwall_init(root);
-
- return 0;
-}
-
-arch_initcall(proc_tile_init);
-
-/*
- * Support /proc/sys/tile directory
- */
-
-static struct ctl_table unaligned_subtable[] = {
- {
- .procname = "enabled",
- .data = &unaligned_fixup,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "printk",
- .data = &unaligned_printk,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "count",
- .data = &unaligned_fixup_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {}
-};
-
-static struct ctl_table unaligned_table[] = {
- {
- .procname = "unaligned_fixup",
- .mode = 0555,
- .child = unaligned_subtable
- },
- {}
-};
-
-static struct ctl_path tile_path[] = {
- { .procname = "tile" },
- { }
-};
-
-static int __init proc_sys_tile_init(void)
-{
- register_sysctl_paths(tile_path, unaligned_table);
- return 0;
-}
-
-arch_initcall(proc_sys_tile_init);
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
deleted file mode 100644
index f0a0e18e4dfb..000000000000
--- a/arch/tile/kernel/process.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <linux/preempt.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/kprobes.h>
-#include <linux/elfcore.h>
-#include <linux/tick.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/compat.h>
-#include <linux/nmi.h>
-#include <linux/syscalls.h>
-#include <linux/kernel.h>
-#include <linux/tracehook.h>
-#include <linux/signal.h>
-#include <linux/delay.h>
-#include <linux/context_tracking.h>
-#include <asm/stack.h>
-#include <asm/switch_to.h>
-#include <asm/homecache.h>
-#include <asm/syscalls.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-#include <linux/uaccess.h>
-#ifdef CONFIG_HARDWALL
-#include <asm/hardwall.h>
-#endif
-#include <arch/chip.h>
-#include <arch/abi.h>
-#include <arch/sim_def.h>
-
-/*
- * Use the (x86) "idle=poll" option to prefer low latency when leaving the
- * idle loop over low power while in the idle loop, e.g. if we have
- * one thread per core and we want to get threads out of futex waits fast.
- */
-static int __init idle_setup(char *str)
-{
- if (!str)
- return -EINVAL;
-
- if (!strcmp(str, "poll")) {
- pr_info("using polling idle threads\n");
- cpu_idle_poll_ctrl(true);
- return 0;
- } else if (!strcmp(str, "halt")) {
- return 0;
- }
- return -1;
-}
-early_param("idle", idle_setup);
-
-void arch_cpu_idle(void)
-{
- __this_cpu_write(irq_stat.idle_timestamp, jiffies);
- _cpu_idle();
-}
-
-/*
- * Release a thread_info structure
- */
-void arch_release_thread_stack(unsigned long *stack)
-{
- struct thread_info *info = (void *)stack;
- struct single_step_state *step_state = info->step_state;
-
- if (step_state) {
-
- /*
- * FIXME: we don't munmap step_state->buffer
- * because the mm_struct for this process (info->task->mm)
- * has already been zeroed in exit_mm(). Keeping a
- * reference to it here seems like a bad move, so this
- * means we can't munmap() the buffer, and therefore if we
- * ptrace multiple threads in a process, we will slowly
- * leak user memory. (Note that as soon as the last
- * thread in a process dies, we will reclaim all user
- * memory including single-step buffers in the usual way.)
- * We should either assign a kernel VA to this buffer
- * somehow, or we should associate the buffer(s) with the
- * mm itself so we can clean them up that way.
- */
- kfree(step_state);
- }
-}
-
-static void save_arch_state(struct thread_struct *t);
-
-int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p)
-{
- struct pt_regs *childregs = task_pt_regs(p);
- unsigned long ksp;
- unsigned long *callee_regs;
-
- /*
- * Set up the stack and stack pointer appropriately for the
- * new child to find itself woken up in __switch_to().
- * The callee-saved registers must be on the stack to be read;
- * the new task will then jump to assembly support to handle
- * calling schedule_tail(), etc., and (for userspace tasks)
- * returning to the context set up in the pt_regs.
- */
- ksp = (unsigned long) childregs;
- ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
- ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
- ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
- callee_regs = (unsigned long *)ksp;
- ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
- ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
- p->thread.ksp = ksp;
-
- /* Record the pid of the task that created this one. */
- p->thread.creator_pid = current->pid;
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- /* kernel thread */
- memset(childregs, 0, sizeof(struct pt_regs));
- memset(&callee_regs[2], 0,
- (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
- callee_regs[0] = sp; /* r30 = function */
- callee_regs[1] = arg; /* r31 = arg */
- p->thread.pc = (unsigned long) ret_from_kernel_thread;
- return 0;
- }
-
- /*
- * Start new thread in ret_from_fork so it schedules properly
- * and then return from interrupt like the parent.
- */
- p->thread.pc = (unsigned long) ret_from_fork;
-
- /*
- * Do not clone step state from the parent; each thread
- * must make its own lazily.
- */
- task_thread_info(p)->step_state = NULL;
-
-#ifdef __tilegx__
- /*
- * Do not clone unalign jit fixup from the parent; each thread
- * must allocate its own on demand.
- */
- task_thread_info(p)->unalign_jit_base = NULL;
-#endif
-
- /*
- * Copy the registers onto the kernel stack so the
- * return-from-interrupt code will reload it into registers.
- */
- *childregs = *current_pt_regs();
- childregs->regs[0] = 0; /* return value is zero */
- if (sp)
- childregs->sp = sp; /* override with new user stack pointer */
- memcpy(callee_regs, &childregs->regs[CALLEE_SAVED_FIRST_REG],
- CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
-
- /* Save user stack top pointer so we can ID the stack vm area later. */
- p->thread.usp0 = childregs->sp;
-
- /*
- * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
- * which is passed in as arg #5 to sys_clone().
- */
- if (clone_flags & CLONE_SETTLS)
- childregs->tp = childregs->regs[4];
-
-
-#if CHIP_HAS_TILE_DMA()
- /*
- * No DMA in the new thread. We model this on the fact that
- * fork() clears the pending signals, alarms, and aio for the child.
- */
- memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
- memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
-#endif
-
- /* New thread has its miscellaneous processor state bits clear. */
- p->thread.proc_status = 0;
-
-#ifdef CONFIG_HARDWALL
- /* New thread does not own any networks. */
- memset(&p->thread.hardwall[0], 0,
- sizeof(struct hardwall_task) * HARDWALL_TYPES);
-#endif
-
-
- /*
- * Start the new thread with the current architecture state
- * (user interrupt masks, etc.).
- */
- save_arch_state(&p->thread);
-
- return 0;
-}
-
-int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
-{
- task_thread_info(tsk)->align_ctl = val;
- return 0;
-}
-
-int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
-{
- return put_user(task_thread_info(tsk)->align_ctl,
- (unsigned int __user *)adr);
-}
-
-static struct task_struct corrupt_current = { .comm = "<corrupt>" };
-
-/*
- * Return "current" if it looks plausible, or else a pointer to a dummy.
- * This can be helpful if we are just trying to emit a clean panic.
- */
-struct task_struct *validate_current(void)
-{
- struct task_struct *tsk = current;
- if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
- (high_memory && (void *)tsk > high_memory) ||
- ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
- pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
- tsk = &corrupt_current;
- }
- return tsk;
-}
-
-/* Take and return the pointer to the previous task, for schedule_tail(). */
-struct task_struct *sim_notify_fork(struct task_struct *prev)
-{
- struct task_struct *tsk = current;
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
- (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
- (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
- return prev;
-}
-
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
- struct pt_regs *ptregs = task_pt_regs(tsk);
- elf_core_copy_regs(regs, ptregs);
- return 1;
-}
-
-#if CHIP_HAS_TILE_DMA()
-
-/* Allow user processes to access the DMA SPRs */
-void grant_dma_mpls(void)
-{
-#if CONFIG_KERNEL_PL == 2
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
-#else
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
-#endif
-}
-
-/* Forbid user processes from accessing the DMA SPRs */
-void restrict_dma_mpls(void)
-{
-#if CONFIG_KERNEL_PL == 2
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
-#else
- __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
- __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
-#endif
-}
-
-/* Pause the DMA engine, then save off its state registers. */
-static void save_tile_dma_state(struct tile_dma_state *dma)
-{
- unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
- unsigned long post_suspend_state;
-
- /* If we're running, suspend the engine. */
- if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
-
- /*
- * Wait for the engine to idle, then save regs. Note that we
- * want to record the "running" bit from before suspension,
- * and the "done" bit from after, so that we can properly
- * distinguish a case where the user suspended the engine from
- * the case where the kernel suspended as part of the context
- * swap.
- */
- do {
- post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
- } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
-
- dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
- dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
- dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
- dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
- dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
- dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
- dma->byte = __insn_mfspr(SPR_DMA_BYTE);
- dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
- (post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
-}
-
-/* Restart a DMA that was running before we were context-switched out. */
-static void restore_tile_dma_state(struct thread_struct *t)
-{
- const struct tile_dma_state *dma = &t->tile_dma_state;
-
- /*
- * The only way to restore the done bit is to run a zero
- * length transaction.
- */
- if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
- !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
- __insn_mtspr(SPR_DMA_BYTE, 0);
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
- while (__insn_mfspr(SPR_DMA_USER_STATUS) &
- SPR_DMA_STATUS__BUSY_MASK)
- ;
- }
-
- __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
- __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
- __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
- __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
- __insn_mtspr(SPR_DMA_STRIDE, dma->strides);
- __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
- __insn_mtspr(SPR_DMA_BYTE, dma->byte);
-
- /*
- * Restart the engine if we were running and not done.
- * Clear a pending async DMA fault that we were waiting on return
- * to user space to execute, since we expect the DMA engine
- * to regenerate those faults for us now. Note that we don't
- * try to clear the TIF_ASYNC_TLB flag, since it's relatively
- * harmless if set, and it covers both DMA and the SN processor.
- */
- if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
- t->dma_async_tlb.fault_num = 0;
- __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
- }
-}
-
-#endif
-
-static void save_arch_state(struct thread_struct *t)
-{
-#if CHIP_HAS_SPLIT_INTR_MASK()
- t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
- ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
-#else
- t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
-#endif
- t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
- t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
- t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
- t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
- t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
- t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
- t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
- t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
-#endif
- t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
-#if CHIP_HAS_DSTREAM_PF()
- t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
-#endif
-}
-
-static void restore_arch_state(const struct thread_struct *t)
-{
-#if CHIP_HAS_SPLIT_INTR_MASK()
- __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
- __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
-#else
- __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
-#endif
- __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
- __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
- __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
- __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
- __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
-#if !CHIP_HAS_FIXED_INTVEC_BASE()
- __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
-#endif
- __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
-#if CHIP_HAS_DSTREAM_PF()
- __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
-#endif
-}
-
-
-void _prepare_arch_switch(struct task_struct *next)
-{
-#if CHIP_HAS_TILE_DMA()
- struct tile_dma_state *dma = &current->thread.tile_dma_state;
- if (dma->enabled)
- save_tile_dma_state(dma);
-#endif
-}
-
-
-struct task_struct *__sched _switch_to(struct task_struct *prev,
- struct task_struct *next)
-{
- /* DMA state is already saved; save off other arch state. */
- save_arch_state(&prev->thread);
-
-#if CHIP_HAS_TILE_DMA()
- /*
- * Restore DMA in new task if desired.
- * Note that it is only safe to restart here since interrupts
- * are disabled, so we can't take any DMATLB miss or access
- * interrupts before we have finished switching stacks.
- */
- if (next->thread.tile_dma_state.enabled) {
- restore_tile_dma_state(&next->thread);
- grant_dma_mpls();
- } else {
- restrict_dma_mpls();
- }
-#endif
-
- /* Restore other arch state. */
- restore_arch_state(&next->thread);
-
-#ifdef CONFIG_HARDWALL
- /* Enable or disable access to the network registers appropriately. */
- hardwall_switch_tasks(prev, next);
-#endif
-
- /* Notify the simulator of task exit. */
- if (unlikely(prev->state == TASK_DEAD))
- __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
- (prev->pid << _SIM_CONTROL_OPERATOR_BITS));
-
- /*
- * Switch kernel SP, PC, and callee-saved registers.
- * In the context of the new task, return the old task pointer
- * (i.e. the task that actually called __switch_to).
- * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
- */
- return __switch_to(prev, next, next_current_ksp0(next));
-}
-
-/*
- * This routine is called on return from interrupt if any of the
- * TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
- * entered with interrupts disabled so we don't miss an event that
- * modified the thread_info flags. We loop until all the tested flags
- * are clear. Note that the function is called on certain conditions
- * that are not listed in the loop condition here (e.g. SINGLESTEP)
- * which guarantees we will do those things once, and redo them if any
- * of the other work items is re-done, but won't continue looping if
- * all the other work is done.
- */
-void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
-{
- if (WARN_ON(!user_mode(regs)))
- return;
-
- do {
- local_irq_enable();
-
- if (thread_info_flags & _TIF_NEED_RESCHED)
- schedule();
-
-#if CHIP_HAS_TILE_DMA()
- if (thread_info_flags & _TIF_ASYNC_TLB)
- do_async_page_fault(regs);
-#endif
-
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
-
- local_irq_disable();
- thread_info_flags = READ_ONCE(current_thread_info()->flags);
-
- } while (thread_info_flags & _TIF_WORK_MASK);
-
- if (thread_info_flags & _TIF_SINGLESTEP) {
- single_step_once(regs);
-#ifndef __tilegx__
- /*
- * FIXME: on tilepro, since we enable interrupts in
- * this routine, it's possible that we miss a signal
- * or other asynchronous event.
- */
- local_irq_disable();
-#endif
- }
-
- user_enter();
-}
-
-unsigned long get_wchan(struct task_struct *p)
-{
- struct KBacktraceIterator kbt;
-
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
- for (KBacktraceIterator_init(&kbt, p, NULL);
- !KBacktraceIterator_end(&kbt);
- KBacktraceIterator_next(&kbt)) {
- if (!in_sched_functions(kbt.it.pc))
- return kbt.it.pc;
- }
-
- return 0;
-}
-
-/* Flush thread state. */
-void flush_thread(void)
-{
- /* Nothing */
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(struct task_struct *tsk)
-{
-#ifdef CONFIG_HARDWALL
- /*
- * Remove the task from the list of tasks that are associated
- * with any live hardwalls. (If the task that is exiting held
- * the last reference to a hardwall fd, it would already have
- * been released and deactivated at this point.)
- */
- hardwall_deactivate_all(tsk);
-#endif
-}
-
-void tile_show_regs(struct pt_regs *regs)
-{
- int i;
-#ifdef __tilegx__
- for (i = 0; i < 17; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
- i, regs->regs[i], i+18, regs->regs[i+18],
- i+36, regs->regs[i+36]);
- pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
- regs->regs[17], regs->regs[35], regs->tp);
- pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
-#else
- for (i = 0; i < 13; i++)
- pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
- " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
- i, regs->regs[i], i+14, regs->regs[i+14],
- i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
- pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
- regs->regs[13], regs->tp, regs->sp, regs->lr);
-#endif
- pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld flags:%s%s%s%s\n",
- regs->pc, regs->ex1, regs->faultnum,
- is_compat_task() ? " compat" : "",
- (regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
- !(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
- (regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
-}
-
-void show_regs(struct pt_regs *regs)
-{
- struct KBacktraceIterator kbt;
-
- show_regs_print_info(KERN_DEFAULT);
- tile_show_regs(regs);
-
- KBacktraceIterator_init(&kbt, NULL, regs);
- tile_show_stack(&kbt);
-}
-
-#ifdef __tilegx__
-void nmi_raise_cpu_backtrace(struct cpumask *in_mask)
-{
- struct cpumask mask;
- HV_Coord tile;
- unsigned int timeout;
- int cpu;
- HV_NMI_Info info[NR_CPUS];
-
- /* Tentatively dump stack on remote tiles via NMI. */
- timeout = 100;
- cpumask_copy(&mask, in_mask);
- while (!cpumask_empty(&mask) && timeout) {
- for_each_cpu(cpu, &mask) {
- tile.x = cpu_x(cpu);
- tile.y = cpu_y(cpu);
- info[cpu] = hv_send_nmi(tile, TILE_NMI_DUMP_STACK, 0);
- if (info[cpu].result == HV_NMI_RESULT_OK)
- cpumask_clear_cpu(cpu, &mask);
- }
-
- mdelay(10);
- touch_softlockup_watchdog();
- timeout--;
- }
-
- /* Warn about cpus stuck in ICS. */
- if (!cpumask_empty(&mask)) {
- for_each_cpu(cpu, &mask) {
-
- /* Clear the bit as if nmi_cpu_backtrace() ran. */
- cpumask_clear_cpu(cpu, in_mask);
-
- switch (info[cpu].result) {
- case HV_NMI_RESULT_FAIL_ICS:
- pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
- cpu, info[cpu].pc);
- break;
- case HV_NMI_RESULT_FAIL_HV:
- pr_warn("Skipping stack dump of cpu %d in hypervisor\n",
- cpu);
- break;
- case HV_ENOSYS:
- WARN_ONCE(1, "Hypervisor too old to allow remote stack dumps.\n");
- break;
- default: /* should not happen */
- pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
- cpu, info[cpu].result, info[cpu].pc);
- break;
- }
- }
- }
-}
-
-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
-{
- nmi_trigger_cpumask_backtrace(mask, exclude_self,
- nmi_raise_cpu_backtrace);
-}
-#endif /* __tilegx_ */
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
deleted file mode 100644
index d516d61751c2..000000000000
--- a/arch/tile/kernel/ptrace.c
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Copied from i386: Ross Biro 1/23/92
- */
-
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/kprobes.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-#include <linux/regset.h>
-#include <linux/elf.h>
-#include <linux/tracehook.h>
-#include <linux/context_tracking.h>
-#include <linux/sched/task_stack.h>
-
-#include <asm/traps.h>
-#include <arch/chip.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
-void user_enable_single_step(struct task_struct *child)
-{
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-void user_disable_single_step(struct task_struct *child)
-{
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- */
-void ptrace_disable(struct task_struct *child)
-{
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-
- /*
- * These two are currently unused, but will be set by arch_ptrace()
- * and used in the syscall assembly when we do support them.
- */
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-}
-
-/*
- * Get registers from task and ready the result for userspace.
- * Note that we localize the API issues to getregs() and putregs() at
- * some cost in performance, e.g. we need a full pt_regs copy for
- * PEEKUSR, and two copies for POKEUSR. But in general we expect
- * GETREGS/PUTREGS to be the API of choice anyway.
- */
-static char *getregs(struct task_struct *child, struct pt_regs *uregs)
-{
- *uregs = *task_pt_regs(child);
-
- /* Set up flags ABI bits. */
- uregs->flags = 0;
-#ifdef CONFIG_COMPAT
- if (task_thread_info(child)->status & TS_COMPAT)
- uregs->flags |= PT_FLAGS_COMPAT;
-#endif
-
- return (char *)uregs;
-}
-
-/* Put registers back to task. */
-static void putregs(struct task_struct *child, struct pt_regs *uregs)
-{
- struct pt_regs *regs = task_pt_regs(child);
-
- /* Don't allow overwriting the kernel-internal flags word. */
- uregs->flags = regs->flags;
-
- /* Only allow setting the ICS bit in the ex1 word. */
- uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1));
-
- *regs = *uregs;
-}
-
-enum tile_regset {
- REGSET_GPR,
-};
-
-static int tile_gpr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- struct pt_regs regs;
-
- getregs(target, &regs);
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &regs, 0,
- sizeof(regs));
-}
-
-static int tile_gpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
- struct pt_regs regs = *task_pt_regs(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
- sizeof(regs));
- if (ret)
- return ret;
-
- putregs(target, &regs);
-
- return 0;
-}
-
-static const struct user_regset tile_user_regset[] = {
- [REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS,
- .n = ELF_NGREG,
- .size = sizeof(elf_greg_t),
- .align = sizeof(elf_greg_t),
- .get = tile_gpr_get,
- .set = tile_gpr_set,
- },
-};
-
-static const struct user_regset_view tile_user_regset_view = {
- .name = CHIP_ARCH_NAME,
- .e_machine = ELF_ARCH,
- .ei_osabi = ELF_OSABI,
- .regsets = tile_user_regset,
- .n = ARRAY_SIZE(tile_user_regset),
-};
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
- return &tile_user_regset_view;
-}
-
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- unsigned long __user *datap = (long __user __force *)data;
- unsigned long tmp;
- long ret = -EIO;
- char *childreg;
- struct pt_regs copyregs;
-
- switch (request) {
-
- case PTRACE_PEEKUSR: /* Read register from pt_regs. */
- if (addr >= PTREGS_SIZE)
- break;
- childreg = getregs(child, &copyregs) + addr;
-#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
- if (addr & (sizeof(compat_long_t)-1))
- break;
- ret = put_user(*(compat_long_t *)childreg,
- (compat_long_t __user *)datap);
- } else
-#endif
- {
- if (addr & (sizeof(long)-1))
- break;
- ret = put_user(*(long *)childreg, datap);
- }
- break;
-
- case PTRACE_POKEUSR: /* Write register in pt_regs. */
- if (addr >= PTREGS_SIZE)
- break;
- childreg = getregs(child, &copyregs) + addr;
-#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
- if (addr & (sizeof(compat_long_t)-1))
- break;
- *(compat_long_t *)childreg = data;
- } else
-#endif
- {
- if (addr & (sizeof(long)-1))
- break;
- *(long *)childreg = data;
- }
- putregs(child, &copyregs);
- ret = 0;
- break;
-
- case PTRACE_GETREGS: /* Get all registers from the child. */
- ret = copy_regset_to_user(child, &tile_user_regset_view,
- REGSET_GPR, 0,
- sizeof(struct pt_regs), datap);
- break;
-
- case PTRACE_SETREGS: /* Set all registers in the child. */
- ret = copy_regset_from_user(child, &tile_user_regset_view,
- REGSET_GPR, 0,
- sizeof(struct pt_regs), datap);
- break;
-
- case PTRACE_GETFPREGS: /* Get the child FPU state. */
- case PTRACE_SETFPREGS: /* Set the child FPU state. */
- break;
-
- case PTRACE_SETOPTIONS:
- /* Support TILE-specific ptrace options. */
- BUILD_BUG_ON(PTRACE_O_MASK_TILE & PTRACE_O_MASK);
- tmp = data & PTRACE_O_MASK_TILE;
- data &= ~PTRACE_O_MASK_TILE;
- ret = ptrace_request(child, request, addr, data);
- if (ret == 0) {
- unsigned int flags = child->ptrace;
- flags &= ~(PTRACE_O_MASK_TILE << PT_OPT_FLAG_SHIFT);
- flags |= (tmp << PT_OPT_FLAG_SHIFT);
- child->ptrace = flags;
- }
- break;
-
- default:
-#ifdef CONFIG_COMPAT
- if (task_thread_info(current)->status & TS_COMPAT) {
- ret = compat_ptrace_request(child, request,
- addr, data);
- break;
- }
-#endif
- ret = ptrace_request(child, request, addr, data);
- break;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-/* Not used; we handle compat issues in arch_ptrace() directly. */
-long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
- compat_ulong_t addr, compat_ulong_t data)
-{
- BUG();
-}
-#endif
-
-int do_syscall_trace_enter(struct pt_regs *regs)
-{
- u32 work = READ_ONCE(current_thread_info()->flags);
-
- if ((work & _TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs)) {
- regs->regs[TREG_SYSCALL_NR] = -1;
- return -1;
- }
-
- if (secure_computing(NULL) == -1)
- return -1;
-
- if (work & _TIF_SYSCALL_TRACEPOINT)
- trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]);
-
- return regs->regs[TREG_SYSCALL_NR];
-}
-
-void do_syscall_trace_exit(struct pt_regs *regs)
-{
- long errno;
-
- /*
- * The standard tile calling convention returns the value (or negative
- * errno) in r0, and zero (or positive errno) in r1.
- * It saves a couple of cycles on the hot path to do this work in
- * registers only as we return, rather than updating the in-memory
- * struct ptregs.
- */
- errno = (long) regs->regs[0];
- if (errno < 0 && errno > -4096)
- regs->regs[1] = -errno;
- else
- regs->regs[1] = 0;
-
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
-
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- trace_sys_exit(regs, regs->regs[0]);
-}
-
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
-{
- struct siginfo info;
-
- memset(&info, 0, sizeof(info));
- info.si_signo = SIGTRAP;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *) regs->pc;
-
- /* Send us the fakey SIGTRAP */
- force_sig_info(SIGTRAP, &info, tsk);
-}
-
-/* Handle synthetic interrupt delivered only by the simulator. */
-void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
-{
- send_sigtrap(current, regs);
-}
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
deleted file mode 100644
index 6c5d2c070a12..000000000000
--- a/arch/tile/kernel/reboot.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/stddef.h>
-#include <linux/reboot.h>
-#include <linux/smp.h>
-#include <linux/pm.h>
-#include <linux/export.h>
-#include <asm/page.h>
-#include <asm/setup.h>
-#include <hv/hypervisor.h>
-
-#ifndef CONFIG_SMP
-#define smp_send_stop()
-#endif
-
-void machine_halt(void)
-{
- arch_local_irq_disable_all();
- smp_send_stop();
- hv_halt();
-}
-
-void machine_power_off(void)
-{
- arch_local_irq_disable_all();
- smp_send_stop();
- hv_power_off();
-}
-
-void machine_restart(char *cmd)
-{
- arch_local_irq_disable_all();
- smp_send_stop();
- hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
-}
-
-/* No interesting distinction to be made here. */
-void (*pm_power_off)(void) = NULL;
-EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
deleted file mode 100644
index 542cae17a93a..000000000000
--- a/arch/tile/kernel/regs_32.S
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/asm-offsets.h>
-#include <arch/spr_def.h>
-#include <asm/processor.h>
-#include <asm/switch_to.h>
-
-/*
- * See <asm/switch_to.h>; called with prev and next task_struct pointers.
- * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
- *
- * We want to save pc/sp in "prev", and get the new pc/sp from "next".
- * We also need to save all the callee-saved registers on the stack.
- *
- * Intel enables/disables access to the hardware cycle counter in
- * seccomp (secure computing) environments if necessary, based on
- * has_secure_computing(). We might want to do this at some point,
- * though it would require virtualizing the other SPRs under WORLD_ACCESS.
- *
- * Since we're saving to the stack, we omit sp from this list.
- * And for parallels with other architectures, we save lr separately,
- * in the thread_struct itself (as the "pc" field).
- *
- * This code also needs to be aligned with process.c copy_thread()
- */
-
-#if CALLEE_SAVED_REGS_COUNT != 24
-# error Mismatch between <asm/switch_to.h> and kernel/entry.S
-#endif
-#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)
-
-#define SAVE_REG(r) { sw r12, r; addi r12, r12, 4 }
-#define LOAD_REG(r) { lw r, r12; addi r12, r12, 4 }
-#define FOR_EACH_CALLEE_SAVED_REG(f) \
- f(r30); f(r31); \
- f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
- f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
- f(r48); f(r49); f(r50); f(r51); f(r52);
-
-STD_ENTRY_SECTION(__switch_to, .sched.text)
- {
- move r10, sp
- sw sp, lr
- addi sp, sp, -FRAME_SIZE
- }
- {
- addi r11, sp, 4
- addi r12, sp, 8
- }
- {
- sw r11, r10
- addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
- }
- {
- lw r13, r4 /* Load new sp to a temp register early. */
- addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
- }
- FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
- {
- sw r3, sp
- addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- sw r3, lr
- addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- lw lr, r4
- addi r12, r13, 8
- }
- {
- /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
- move sp, r13
- mtspr SPR_SYSTEM_SAVE_K_0, r2
- }
- FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
-.L__switch_to_pc:
- {
- addi sp, sp, FRAME_SIZE
- jrp lr /* r0 is still valid here, so return it */
- }
- STD_ENDPROC(__switch_to)
-
-/* Return a suitable address for the backtracer for suspended threads */
-STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
- lnk r0
- {
- addli r0, r0, .L__switch_to_pc - .
- jrp lr
- }
- STD_ENDPROC(get_switch_to_pc)
-
-STD_ENTRY(get_pt_regs)
- .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
- r8, r9, r10, r11, r12, r13, r14, r15, \
- r16, r17, r18, r19, r20, r21, r22, r23, \
- r24, r25, r26, r27, r28, r29, r30, r31, \
- r32, r33, r34, r35, r36, r37, r38, r39, \
- r40, r41, r42, r43, r44, r45, r46, r47, \
- r48, r49, r50, r51, r52, tp, sp
- {
- sw r0, \reg
- addi r0, r0, 4
- }
- .endr
- {
- sw r0, lr
- addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
- }
- lnk r1
- {
- sw r0, r1
- addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r1, INTERRUPT_CRITICAL_SECTION
- shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
- ori r1, r1, KERNEL_PL
- {
- sw r0, r1
- addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- }
- {
- sw r0, zero /* clear faultnum */
- addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
- }
- {
- sw r0, zero /* clear orig_r0 */
- addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */
- }
- jrp lr
- STD_ENDPROC(get_pt_regs)
diff --git a/arch/tile/kernel/regs_64.S b/arch/tile/kernel/regs_64.S
deleted file mode 100644
index bbffcc6f340f..000000000000
--- a/arch/tile/kernel/regs_64.S
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/asm-offsets.h>
-#include <arch/spr_def.h>
-#include <asm/processor.h>
-#include <asm/switch_to.h>
-
-/*
- * See <asm/switch_to.h>; called with prev and next task_struct pointers.
- * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
- *
- * We want to save pc/sp in "prev", and get the new pc/sp from "next".
- * We also need to save all the callee-saved registers on the stack.
- *
- * Intel enables/disables access to the hardware cycle counter in
- * seccomp (secure computing) environments if necessary, based on
- * has_secure_computing(). We might want to do this at some point,
- * though it would require virtualizing the other SPRs under WORLD_ACCESS.
- *
- * Since we're saving to the stack, we omit sp from this list.
- * And for parallels with other architectures, we save lr separately,
- * in the thread_struct itself (as the "pc" field).
- *
- * This code also needs to be aligned with process.c copy_thread()
- */
-
-#if CALLEE_SAVED_REGS_COUNT != 24
-# error Mismatch between <asm/switch_to.h> and kernel/entry.S
-#endif
-#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8)
-
-#define SAVE_REG(r) { st r12, r; addi r12, r12, 8 }
-#define LOAD_REG(r) { ld r, r12; addi r12, r12, 8 }
-#define FOR_EACH_CALLEE_SAVED_REG(f) \
- f(r30); f(r31); \
- f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
- f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
- f(r48); f(r49); f(r50); f(r51); f(r52);
-
-STD_ENTRY_SECTION(__switch_to, .sched.text)
- {
- move r10, sp
- st sp, lr
- }
- {
- addli r11, sp, -FRAME_SIZE + 8
- addli sp, sp, -FRAME_SIZE
- }
- {
- st r11, r10
- addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
- }
- {
- ld r13, r4 /* Load new sp to a temp register early. */
- addi r12, sp, 16
- }
- FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
- addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
- {
- st r3, sp
- addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- st r3, lr
- addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
- }
- {
- ld lr, r4
- addi r12, r13, 16
- }
- {
- /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
- move sp, r13
- mtspr SPR_SYSTEM_SAVE_K_0, r2
- }
- FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
-.L__switch_to_pc:
- {
- addli sp, sp, FRAME_SIZE
- jrp lr /* r0 is still valid here, so return it */
- }
- STD_ENDPROC(__switch_to)
-
-/* Return a suitable address for the backtracer for suspended threads */
-STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
- lnk r0
- {
- addli r0, r0, .L__switch_to_pc - .
- jrp lr
- }
- STD_ENDPROC(get_switch_to_pc)
-
-STD_ENTRY(get_pt_regs)
- .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
- r8, r9, r10, r11, r12, r13, r14, r15, \
- r16, r17, r18, r19, r20, r21, r22, r23, \
- r24, r25, r26, r27, r28, r29, r30, r31, \
- r32, r33, r34, r35, r36, r37, r38, r39, \
- r40, r41, r42, r43, r44, r45, r46, r47, \
- r48, r49, r50, r51, r52, tp, sp
- {
- st r0, \reg
- addi r0, r0, 8
- }
- .endr
- {
- st r0, lr
- addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
- }
- lnk r1
- {
- st r0, r1
- addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
- }
- mfspr r1, INTERRUPT_CRITICAL_SECTION
- shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
- ori r1, r1, KERNEL_PL
- {
- st r0, r1
- addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
- }
- {
- st r0, zero /* clear faultnum */
- addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
- }
- {
- st r0, zero /* clear orig_r0 */
- addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */
- }
- jrp lr
- STD_ENDPROC(get_pt_regs)
diff --git a/arch/tile/kernel/relocate_kernel_32.S b/arch/tile/kernel/relocate_kernel_32.S
deleted file mode 100644
index e44fbcf8cbd5..000000000000
--- a/arch/tile/kernel/relocate_kernel_32.S
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * copy new kernel into place and then call hv_reexec
- *
- */
-
-#include <linux/linkage.h>
-#include <arch/chip.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-#undef RELOCATE_NEW_KERNEL_VERBOSE
-
-STD_ENTRY(relocate_new_kernel)
-
- move r30, r0 /* page list */
- move r31, r1 /* address of page we are on */
- move r32, r2 /* start address of new kernel */
-
- shri r1, r1, PAGE_SHIFT
- addi r1, r1, 1
- shli sp, r1, PAGE_SHIFT
- addi sp, sp, -8
- /* we now have a stack (whether we need one or not) */
-
- moveli r40, lo16(hv_console_putc)
- auli r40, r40, ha16(hv_console_putc)
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'r'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'n'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'k'
- jalr r40
-
- moveli r0, '\n'
- jalr r40
-#endif
-
- /*
- * Throughout this code r30 is pointer to the element of page
- * list we are working on.
- *
- * Normally we get to the next element of the page list by
- * incrementing r30 by four. The exception is if the element
- * on the page list is an IND_INDIRECTION in which case we use
- * the element with the low bits masked off as the new value
- * of r30.
- *
- * To get this started, we need the value passed to us (which
- * will always be an IND_INDIRECTION) in memory somewhere with
- * r30 pointing at it. To do that, we push the value passed
- * to us on the stack and make r30 point to it.
- */
-
- sw sp, r30
- move r30, sp
- addi sp, sp, -8
-
- /*
- * On TILEPro, we need to flush all tiles' caches, since we may
- * have been doing hash-for-home caching there. Note that we
- * must do this _after_ we're completely done modifying any memory
- * other than our output buffer (which we know is locally cached).
- * We want the caches to be fully clean when we do the reexec,
- * because the hypervisor is going to do this flush again at that
- * point, and we don't want that second flush to overwrite any memory.
- */
- {
- move r0, zero /* cache_pa */
- move r1, zero
- }
- {
- auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */
- movei r3, -1 /* cache_cpumask; -1 means all client tiles */
- }
- {
- move r4, zero /* tlb_va */
- move r5, zero /* tlb_length */
- }
- {
- move r6, zero /* tlb_pgsize */
- move r7, zero /* tlb_cpumask */
- }
- {
- move r8, zero /* asids */
- moveli r20, lo16(hv_flush_remote)
- }
- {
- move r9, zero /* asidcount */
- auli r20, r20, ha16(hv_flush_remote)
- }
-
- jalr r20
-
- /* r33 is destination pointer, default to zero */
-
- moveli r33, 0
-
-.Lloop: lw r10, r30
-
- andi r9, r10, 0xf /* low 4 bits tell us what type it is */
- xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
-
- seqi r0, r9, 0x1 /* IND_DESTINATION */
- bzt r0, .Ltry2
-
- move r33, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'd'
- jalr r40
-#endif
-
- addi r30, r30, 4
- j .Lloop
-
-.Ltry2:
- seqi r0, r9, 0x2 /* IND_INDIRECTION */
- bzt r0, .Ltry4
-
- move r30, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'i'
- jalr r40
-#endif
-
- j .Lloop
-
-.Ltry4:
- seqi r0, r9, 0x4 /* IND_DONE */
- bzt r0, .Ltry8
-
- mf
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'D'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-
- move r0, r32
- moveli r1, 0 /* arg to hv_reexec is 64 bits */
-
- moveli r41, lo16(hv_reexec)
- auli r41, r41, ha16(hv_reexec)
-
- jalr r41
-
- /* we should not get here */
-
- moveli r0, '?'
- jalr r40
- moveli r0, '\n'
- jalr r40
-
- j .Lhalt
-
-.Ltry8: seqi r0, r9, 0x8 /* IND_SOURCE */
- bz r0, .Lerr /* unknown type */
-
- /* copy page at r10 to page at r33 */
-
- move r11, r33
-
- moveli r0, lo16(PAGE_SIZE)
- auli r0, r0, ha16(PAGE_SIZE)
- add r33, r33, r0
-
- /* copy word at r10 to word at r11 until r11 equals r33 */
-
- /* We know page size must be multiple of 16, so we can unroll
- * 16 times safely without any edge case checking.
- *
- * Issue a flush of the destination every 16 words to avoid
- * incoherence when starting the new kernel. (Now this is
- * just good paranoia because the hv_reexec call will also
- * take care of this.)
- */
-
-1:
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0; addi r11, r11, 4 }
- { lw r0, r10; addi r10, r10, 4 }
- { sw r11, r0 }
- { flush r11 ; addi r11, r11, 4 }
-
- seq r0, r33, r11
- bzt r0, 1b
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 's'
- jalr r40
-#endif
-
- addi r30, r30, 4
- j .Lloop
-
-
-.Lerr: moveli r0, 'e'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, '\n'
- jalr r40
-.Lhalt:
- moveli r41, lo16(hv_halt)
- auli r41, r41, ha16(hv_halt)
-
- jalr r41
- STD_ENDPROC(relocate_new_kernel)
-
- .section .rodata,"a"
-
- .globl relocate_new_kernel_size
-relocate_new_kernel_size:
- .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
deleted file mode 100644
index d9d8cf6176e8..000000000000
--- a/arch/tile/kernel/relocate_kernel_64.S
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * copy new kernel into place and then call hv_reexec
- *
- */
-
-#include <linux/linkage.h>
-#include <arch/chip.h>
-#include <asm/page.h>
-#include <hv/hypervisor.h>
-
-#undef RELOCATE_NEW_KERNEL_VERBOSE
-
-STD_ENTRY(relocate_new_kernel)
-
- move r30, r0 /* page list */
- move r31, r1 /* address of page we are on */
- move r32, r2 /* start address of new kernel */
-
- shrui r1, r1, PAGE_SHIFT
- addi r1, r1, 1
- shli sp, r1, PAGE_SHIFT
- addi sp, sp, -8
- /* we now have a stack (whether we need one or not) */
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r40, hw2_last(hv_console_putc)
- shl16insli r40, r40, hw1(hv_console_putc)
- shl16insli r40, r40, hw0(hv_console_putc)
-
- moveli r0, 'r'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'n'
- jalr r40
-
- moveli r0, '_'
- jalr r40
-
- moveli r0, 'k'
- jalr r40
-
- moveli r0, '\n'
- jalr r40
-#endif
-
- /*
- * Throughout this code r30 is pointer to the element of page
- * list we are working on.
- *
- * Normally we get to the next element of the page list by
- * incrementing r30 by eight. The exception is if the element
- * on the page list is an IND_INDIRECTION in which case we use
- * the element with the low bits masked off as the new value
- * of r30.
- *
- * To get this started, we need the value passed to us (which
- * will always be an IND_INDIRECTION) in memory somewhere with
- * r30 pointing at it. To do that, we push the value passed
- * to us on the stack and make r30 point to it.
- */
-
- st sp, r30
- move r30, sp
- addi sp, sp, -16
-
- /*
- * On TILE-GX, we need to flush all tiles' caches, since we may
- * have been doing hash-for-home caching there. Note that we
- * must do this _after_ we're completely done modifying any memory
- * other than our output buffer (which we know is locally cached).
- * We want the caches to be fully clean when we do the reexec,
- * because the hypervisor is going to do this flush again at that
- * point, and we don't want that second flush to overwrite any memory.
- */
- {
- move r0, zero /* cache_pa */
- moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
- }
- {
- shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
- movei r2, -1 /* cache_cpumask; -1 means all client tiles */
- }
- {
- shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2) /* cache_control */
- move r3, zero /* tlb_va */
- }
- {
- move r4, zero /* tlb_length */
- move r5, zero /* tlb_pgsize */
- }
- {
- move r6, zero /* tlb_cpumask */
- move r7, zero /* asids */
- }
- {
- moveli r20, hw2_last(hv_flush_remote)
- move r8, zero /* asidcount */
- }
- shl16insli r20, r20, hw1(hv_flush_remote)
- shl16insli r20, r20, hw0(hv_flush_remote)
-
- jalr r20
-
- /* r33 is destination pointer, default to zero */
-
- moveli r33, 0
-
-.Lloop: ld r10, r30
-
- andi r9, r10, 0xf /* low 4 bits tell us what type it is */
- xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
-
- cmpeqi r0, r9, 0x1 /* IND_DESTINATION */
- beqzt r0, .Ltry2
-
- move r33, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'd'
- jalr r40
-#endif
-
- addi r30, r30, 8
- j .Lloop
-
-.Ltry2:
- cmpeqi r0, r9, 0x2 /* IND_INDIRECTION */
- beqzt r0, .Ltry4
-
- move r30, r10
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'i'
- jalr r40
-#endif
-
- j .Lloop
-
-.Ltry4:
- cmpeqi r0, r9, 0x4 /* IND_DONE */
- beqzt r0, .Ltry8
-
- mf
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'D'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-
- move r0, r32
-
- moveli r41, hw2_last(hv_reexec)
- shl16insli r41, r41, hw1(hv_reexec)
- shl16insli r41, r41, hw0(hv_reexec)
-
- jalr r41
-
- /* we should not get here */
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, '?'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-
- j .Lhalt
-
-.Ltry8: cmpeqi r0, r9, 0x8 /* IND_SOURCE */
- beqz r0, .Lerr /* unknown type */
-
- /* copy page at r10 to page at r33 */
-
- move r11, r33
-
- moveli r0, hw2_last(PAGE_SIZE)
- shl16insli r0, r0, hw1(PAGE_SIZE)
- shl16insli r0, r0, hw0(PAGE_SIZE)
- add r33, r33, r0
-
- /* copy word at r10 to word at r11 until r11 equals r33 */
-
- /* We know page size must be multiple of 8, so we can unroll
- * 8 times safely without any edge case checking.
- *
- * Issue a flush of the destination every 8 words to avoid
- * incoherence when starting the new kernel. (Now this is
- * just good paranoia because the hv_reexec call will also
- * take care of this.)
- */
-
-1:
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0; addi r11, r11, 8 }
- { ld r0, r10; addi r10, r10, 8 }
- { st r11, r0 }
- { flush r11 ; addi r11, r11, 8 }
-
- cmpeq r0, r33, r11
- beqzt r0, 1b
-
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 's'
- jalr r40
-#endif
-
- addi r30, r30, 8
- j .Lloop
-
-
-.Lerr:
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
- moveli r0, 'e'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, 'r'
- jalr r40
- moveli r0, '\n'
- jalr r40
-#endif
-.Lhalt:
- moveli r41, hw2_last(hv_halt)
- shl16insli r41, r41, hw1(hv_halt)
- shl16insli r41, r41, hw0(hv_halt)
-
- jalr r41
- STD_ENDPROC(relocate_new_kernel)
-
- .section .rodata,"a"
-
- .globl relocate_new_kernel_size
-relocate_new_kernel_size:
- .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
deleted file mode 100644
index eb4e198f6f93..000000000000
--- a/arch/tile/kernel/setup.c
+++ /dev/null
@@ -1,1743 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mmzone.h>
-#include <linux/bootmem.h>
-#include <linux/module.h>
-#include <linux/node.h>
-#include <linux/cpu.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/kexec.h>
-#include <linux/pci.h>
-#include <linux/swiotlb.h>
-#include <linux/initrd.h>
-#include <linux/io.h>
-#include <linux/highmem.h>
-#include <linux/smp.h>
-#include <linux/timex.h>
-#include <linux/hugetlb.h>
-#include <linux/start_kernel.h>
-#include <linux/screen_info.h>
-#include <linux/tick.h>
-#include <asm/setup.h>
-#include <asm/sections.h>
-#include <asm/cacheflush.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-#include <hv/hypervisor.h>
-#include <arch/interrupts.h>
-
-/* <linux/smp.h> doesn't provide this definition. */
-#ifndef CONFIG_SMP
-#define setup_max_cpus 1
-#endif
-
-static inline int ABS(int x) { return x >= 0 ? x : -x; }
-
-/* Chip information */
-char chip_model[64] __ro_after_init;
-
-#ifdef CONFIG_VT
-struct screen_info screen_info;
-#endif
-
-struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-
-/* Information on the NUMA nodes that we compute early */
-unsigned long node_start_pfn[MAX_NUMNODES];
-unsigned long node_end_pfn[MAX_NUMNODES];
-unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
-unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
-unsigned long __initdata node_free_pfn[MAX_NUMNODES];
-
-static unsigned long __initdata node_percpu[MAX_NUMNODES];
-
-/*
- * per-CPU stack and boot info.
- */
-DEFINE_PER_CPU(unsigned long, boot_sp) =
- (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA;
-
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
-#else
-/*
- * The variable must be __initdata since it references __init code.
- * With CONFIG_SMP it is per-cpu data, which is exempt from validation.
- */
-unsigned long __initdata boot_pc = (unsigned long)start_kernel;
-#endif
-
-#ifdef CONFIG_HIGHMEM
-/* Page frame index of end of lowmem on each controller. */
-unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
-
-/* Number of pages that can be mapped into lowmem. */
-static unsigned long __initdata mappable_physpages;
-#endif
-
-/* Data on which physical memory controller corresponds to which NUMA node */
-int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
-
-#ifdef CONFIG_HIGHMEM
-/* Map information from VAs to PAs */
-unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
- __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
-EXPORT_SYMBOL(pbase_map);
-
-/* Map information from PAs to VAs */
-void *vbase_map[NR_PA_HIGHBIT_VALUES]
- __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
-EXPORT_SYMBOL(vbase_map);
-#endif
-
-/* Node number as a function of the high PA bits */
-int highbits_to_node[NR_PA_HIGHBIT_VALUES] __ro_after_init;
-EXPORT_SYMBOL(highbits_to_node);
-
-static unsigned int __initdata maxmem_pfn = -1U;
-static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
- [0 ... MAX_NUMNODES-1] = -1U
-};
-static nodemask_t __initdata isolnodes;
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
-enum { DEFAULT_PCI_RESERVE_MB = 64 };
-static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
-unsigned long __initdata pci_reserve_start_pfn = -1U;
-unsigned long __initdata pci_reserve_end_pfn = -1U;
-#endif
-
-static int __init setup_maxmem(char *str)
-{
- unsigned long long maxmem;
- if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
- return -EINVAL;
-
- maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
- pr_info("Forcing RAM used to no more than %dMB\n",
- maxmem_pfn >> (20 - PAGE_SHIFT));
- return 0;
-}
-early_param("maxmem", setup_maxmem);
-
-static int __init setup_maxnodemem(char *str)
-{
- char *endp;
- unsigned long long maxnodemem;
- unsigned long node;
-
- node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
- if (node >= MAX_NUMNODES || *endp != ':')
- return -EINVAL;
-
- maxnodemem = memparse(endp+1, NULL);
- maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
- (HPAGE_SHIFT - PAGE_SHIFT);
- pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
- node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
- return 0;
-}
-early_param("maxnodemem", setup_maxnodemem);
-
-struct memmap_entry {
- u64 addr; /* start of memory segment */
- u64 size; /* size of memory segment */
-};
-static struct memmap_entry memmap_map[64];
-static int memmap_nr;
-
-static void add_memmap_region(u64 addr, u64 size)
-{
- if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
- pr_err("Ooops! Too many entries in the memory map!\n");
- return;
- }
- memmap_map[memmap_nr].addr = addr;
- memmap_map[memmap_nr].size = size;
- memmap_nr++;
-}
-
-static int __init setup_memmap(char *p)
-{
- char *oldp;
- u64 start_at, mem_size;
-
- if (!p)
- return -EINVAL;
-
- if (!strncmp(p, "exactmap", 8)) {
- pr_err("\"memmap=exactmap\" not valid on tile\n");
- return 0;
- }
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- return -EINVAL;
-
- if (*p == '@') {
- pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
- } else if (*p == '#') {
- pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
- } else if (*p == '$') {
- start_at = memparse(p+1, &p);
- add_memmap_region(start_at, mem_size);
- } else {
- if (mem_size == 0)
- return -EINVAL;
- maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
- (HPAGE_SHIFT - PAGE_SHIFT);
- }
- return *p == '\0' ? 0 : -EINVAL;
-}
-early_param("memmap", setup_memmap);
-
-static int __init setup_mem(char *str)
-{
- return setup_maxmem(str);
-}
-early_param("mem", setup_mem); /* compatibility with x86 */
-
-static int __init setup_isolnodes(char *str)
-{
- if (str == NULL || nodelist_parse(str, isolnodes) != 0)
- return -EINVAL;
-
- pr_info("Set isolnodes value to '%*pbl'\n",
- nodemask_pr_args(&isolnodes));
- return 0;
-}
-early_param("isolnodes", setup_isolnodes);
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
-static int __init setup_pci_reserve(char* str)
-{
- if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 ||
- pci_reserve_mb > 3 * 1024)
- return -EINVAL;
-
- pr_info("Reserving %dMB for PCIE root complex mappings\n",
- pci_reserve_mb);
- return 0;
-}
-early_param("pci_reserve", setup_pci_reserve);
-#endif
-
-#ifndef __tilegx__
-/*
- * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
- * This can be used to increase (or decrease) the vmalloc area.
- */
-static int __init parse_vmalloc(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
-
- /* See validate_va() for more on this test. */
- if ((long)_VMALLOC_START >= 0)
- early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
- VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
-
- return 0;
-}
-early_param("vmalloc", parse_vmalloc);
-#endif
-
-#ifdef CONFIG_HIGHMEM
-/*
- * Determine for each controller where its lowmem is mapped and how much of
- * it is mapped there. On controller zero, the first few megabytes are
- * already mapped in as code at MEM_SV_START, so in principle we could
- * start our data mappings higher up, but for now we don't bother, to avoid
- * additional confusion.
- *
- * One question is whether, on systems with more than 768 Mb and
- * controllers of different sizes, to map in a proportionate amount of
- * each one, or to try to map the same amount from each controller.
- * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
- * respectively, do we map 256MB from each, or do we map 128 MB, 512
- * MB, and 128 MB respectively?) For now we use a proportionate
- * solution like the latter.
- *
- * The VA/PA mapping demands that we align our decisions at 16 MB
- * boundaries so that we can rapidly convert VA to PA.
- */
-static void *__init setup_pa_va_mapping(void)
-{
- unsigned long curr_pages = 0;
- unsigned long vaddr = PAGE_OFFSET;
- nodemask_t highonlynodes = isolnodes;
- int i, j;
-
- memset(pbase_map, -1, sizeof(pbase_map));
- memset(vbase_map, -1, sizeof(vbase_map));
-
- /* Node zero cannot be isolated for LOWMEM purposes. */
- node_clear(0, highonlynodes);
-
- /* Count up the number of pages on non-highonlynodes controllers. */
- mappable_physpages = 0;
- for_each_online_node(i) {
- if (!node_isset(i, highonlynodes))
- mappable_physpages +=
- node_end_pfn[i] - node_start_pfn[i];
- }
-
- for_each_online_node(i) {
- unsigned long start = node_start_pfn[i];
- unsigned long end = node_end_pfn[i];
- unsigned long size = end - start;
- unsigned long vaddr_end;
-
- if (node_isset(i, highonlynodes)) {
- /* Mark this controller as having no lowmem. */
- node_lowmem_end_pfn[i] = start;
- continue;
- }
-
- curr_pages += size;
- if (mappable_physpages > MAXMEM_PFN) {
- vaddr_end = PAGE_OFFSET +
- (((u64)curr_pages * MAXMEM_PFN /
- mappable_physpages)
- << PAGE_SHIFT);
- } else {
- vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
- }
- for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
- unsigned long this_pfn =
- start + (j << HUGETLB_PAGE_ORDER);
- pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
- if (vbase_map[__pfn_to_highbits(this_pfn)] ==
- (void *)-1)
- vbase_map[__pfn_to_highbits(this_pfn)] =
- (void *)(vaddr & HPAGE_MASK);
- }
- node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
- BUG_ON(node_lowmem_end_pfn[i] > end);
- }
-
- /* Return highest address of any mapped memory. */
- return (void *)vaddr;
-}
-#endif /* CONFIG_HIGHMEM */
-
-/*
- * Register our most important memory mappings with the debug stub.
- *
- * This is up to 4 mappings for lowmem, one mapping per memory
- * controller, plus one for our text segment.
- */
-static void store_permanent_mappings(void)
-{
- int i;
-
- for_each_online_node(i) {
- HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
-#ifdef CONFIG_HIGHMEM
- HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
-#else
- HV_PhysAddr high_mapped_pa = node_end_pfn[i];
-#endif
-
- unsigned long pages = high_mapped_pa - node_start_pfn[i];
- HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
- hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
- }
-
- hv_store_mapping((HV_VirtAddr)_text,
- (uint32_t)(_einittext - _text), 0);
-}
-
-/*
- * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
- * and node_online_map, doing suitable sanity-checking.
- * Also set min_low_pfn, max_low_pfn, and max_pfn.
- */
-static void __init setup_memory(void)
-{
- int i, j;
- int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
-#ifdef CONFIG_HIGHMEM
- long highmem_pages;
-#endif
-#ifndef __tilegx__
- int cap;
-#endif
-#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
- long lowmem_pages;
-#endif
- unsigned long physpages = 0;
-
- /* We are using a char to hold the cpu_2_node[] mapping */
- BUILD_BUG_ON(MAX_NUMNODES > 127);
-
- /* Discover the ranges of memory available to us */
- for (i = 0; ; ++i) {
- unsigned long start, size, end, highbits;
- HV_PhysAddrRange range = hv_inquire_physical(i);
- if (range.size == 0)
- break;
-#ifdef CONFIG_FLATMEM
- if (i > 0) {
- pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
- range.size, range.start + range.size);
- continue;
- }
-#endif
-#ifndef __tilegx__
- if ((unsigned long)range.start) {
- pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
-#endif
- if ((range.start & (HPAGE_SIZE-1)) != 0 ||
- (range.size & (HPAGE_SIZE-1)) != 0) {
- unsigned long long start_pa = range.start;
- unsigned long long orig_size = range.size;
- range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
- range.size -= (range.start - start_pa);
- range.size &= HPAGE_MASK;
- pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
- start_pa, start_pa + orig_size,
- range.start, range.start + range.size);
- }
- highbits = __pa_to_highbits(range.start);
- if (highbits >= NR_PA_HIGHBIT_VALUES) {
- pr_err("PA high bits too high: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
- if (highbits_seen[highbits]) {
- pr_err("Range overlaps in high bits: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
- highbits_seen[highbits] = 1;
- if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
- int max_size = maxnodemem_pfn[i];
- if (max_size > 0) {
- pr_err("Maxnodemem reduced node %d to %d pages\n",
- i, max_size);
- range.size = PFN_PHYS(max_size);
- } else {
- pr_err("Maxnodemem disabled node %d\n", i);
- continue;
- }
- }
- if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
- int max_size = maxmem_pfn - physpages;
- if (max_size > 0) {
- pr_err("Maxmem reduced node %d to %d pages\n",
- i, max_size);
- range.size = PFN_PHYS(max_size);
- } else {
- pr_err("Maxmem disabled node %d\n", i);
- continue;
- }
- }
- if (i >= MAX_NUMNODES) {
- pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
- i, range.size, range.size + range.start);
- continue;
- }
-
- start = range.start >> PAGE_SHIFT;
- size = range.size >> PAGE_SHIFT;
- end = start + size;
-
-#ifndef __tilegx__
- if (((HV_PhysAddr)end << PAGE_SHIFT) !=
- (range.start + range.size)) {
- pr_err("PAs too high to represent: %#llx..%#llx\n",
- range.start, range.start + range.size);
- continue;
- }
-#endif
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- /*
- * Blocks that overlap the pci reserved region must
- * have enough space to hold the maximum percpu data
- * region at the top of the range. If there isn't
- * enough space above the reserved region, just
- * truncate the node.
- */
- if (start <= pci_reserve_start_pfn &&
- end > pci_reserve_start_pfn) {
- unsigned int per_cpu_size =
- __per_cpu_end - __per_cpu_start;
- unsigned int percpu_pages =
- NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
- if (end < pci_reserve_end_pfn + percpu_pages) {
- end = pci_reserve_start_pfn;
- pr_err("PCI mapping region reduced node %d to %ld pages\n",
- i, end - start);
- }
- }
-#endif
-
- for (j = __pfn_to_highbits(start);
- j <= __pfn_to_highbits(end - 1); j++)
- highbits_to_node[j] = i;
-
- node_start_pfn[i] = start;
- node_end_pfn[i] = end;
- node_controller[i] = range.controller;
- physpages += size;
- max_pfn = end;
-
- /* Mark node as online */
- node_set(i, node_online_map);
- node_set(i, node_possible_map);
- }
-
-#ifndef __tilegx__
- /*
- * For 4KB pages, mem_map "struct page" data is 1% of the size
- * of the physical memory, so can be quite big (640 MB for
- * four 16G zones). These structures must be mapped in
- * lowmem, and since we currently cap out at about 768 MB,
- * it's impractical to try to use this much address space.
- * For now, arbitrarily cap the amount of physical memory
- * we're willing to use at 8 million pages (32GB of 4KB pages).
- */
- cap = 8 * 1024 * 1024; /* 8 million pages */
- if (physpages > cap) {
- int num_nodes = num_online_nodes();
- int cap_each = cap / num_nodes;
- unsigned long dropped_pages = 0;
- for (i = 0; i < num_nodes; ++i) {
- int size = node_end_pfn[i] - node_start_pfn[i];
- if (size > cap_each) {
- dropped_pages += (size - cap_each);
- node_end_pfn[i] = node_start_pfn[i] + cap_each;
- }
- }
- physpages -= dropped_pages;
- pr_warn("Only using %ldMB memory - ignoring %ldMB\n",
- physpages >> (20 - PAGE_SHIFT),
- dropped_pages >> (20 - PAGE_SHIFT));
- pr_warn("Consider using a larger page size\n");
- }
-#endif
-
- /* Heap starts just above the last loaded address. */
- min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
-
-#ifdef CONFIG_HIGHMEM
- /* Find where we map lowmem from each controller. */
- high_memory = setup_pa_va_mapping();
-
- /* Set max_low_pfn based on what node 0 can directly address. */
- max_low_pfn = node_lowmem_end_pfn[0];
-
- lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
- MAXMEM_PFN : mappable_physpages;
- highmem_pages = (long) (physpages - lowmem_pages);
-
- pr_notice("%ldMB HIGHMEM available\n",
- pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
- pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
-#else
- /* Set max_low_pfn based on what node 0 can directly address. */
- max_low_pfn = node_end_pfn[0];
-
-#ifndef __tilegx__
- if (node_end_pfn[0] > MAXMEM_PFN) {
- pr_warn("Only using %ldMB LOWMEM\n", MAXMEM >> 20);
- pr_warn("Use a HIGHMEM enabled kernel\n");
- max_low_pfn = MAXMEM_PFN;
- max_pfn = MAXMEM_PFN;
- node_end_pfn[0] = MAXMEM_PFN;
- } else {
- pr_notice("%ldMB memory available\n",
- pages_to_mb(node_end_pfn[0]));
- }
- for (i = 1; i < MAX_NUMNODES; ++i) {
- node_start_pfn[i] = 0;
- node_end_pfn[i] = 0;
- }
- high_memory = __va(node_end_pfn[0]);
-#else
- lowmem_pages = 0;
- for (i = 0; i < MAX_NUMNODES; ++i) {
- int pages = node_end_pfn[i] - node_start_pfn[i];
- lowmem_pages += pages;
- if (pages)
- high_memory = pfn_to_kaddr(node_end_pfn[i]);
- }
- pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
-#endif
-#endif
-}
-
-/*
- * On 32-bit machines, we only put bootmem on the low controller,
- * since PAs > 4GB can't be used in bootmem. In principle one could
- * imagine, e.g., multiple 1 GB controllers all of which could support
- * bootmem, but in practice using controllers this small isn't a
- * particularly interesting scenario, so we just keep it simple and
- * use only the first controller for bootmem on 32-bit machines.
- */
-static inline int node_has_bootmem(int nid)
-{
-#ifdef CONFIG_64BIT
- return 1;
-#else
- return nid == 0;
-#endif
-}
-
-static inline unsigned long alloc_bootmem_pfn(int nid,
- unsigned long size,
- unsigned long goal)
-{
- void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
- PAGE_SIZE, goal);
- unsigned long pfn = kaddr_to_pfn(kva);
- BUG_ON(goal && PFN_PHYS(pfn) != goal);
- return pfn;
-}
-
-static void __init setup_bootmem_allocator_node(int i)
-{
- unsigned long start, end, mapsize, mapstart;
-
- if (node_has_bootmem(i)) {
- NODE_DATA(i)->bdata = &bootmem_node_data[i];
- } else {
- /* Share controller zero's bdata for now. */
- NODE_DATA(i)->bdata = &bootmem_node_data[0];
- return;
- }
-
- /* Skip up to after the bss in node 0. */
- start = (i == 0) ? min_low_pfn : node_start_pfn[i];
-
- /* Only lowmem, if we're a HIGHMEM build. */
-#ifdef CONFIG_HIGHMEM
- end = node_lowmem_end_pfn[i];
-#else
- end = node_end_pfn[i];
-#endif
-
- /* No memory here. */
- if (end == start)
- return;
-
- /* Figure out where the bootmem bitmap is located. */
- mapsize = bootmem_bootmap_pages(end - start);
- if (i == 0) {
- /* Use some space right before the heap on node 0. */
- mapstart = start;
- start += mapsize;
- } else {
- /* Allocate bitmap on node 0 to avoid page table issues. */
- mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
- }
-
- /* Initialize a node. */
- init_bootmem_node(NODE_DATA(i), mapstart, start, end);
-
- /* Free all the space back into the allocator. */
- free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- /*
- * Throw away any memory aliased by the PCI region.
- */
- if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
- start = max(pci_reserve_start_pfn, start);
- end = min(pci_reserve_end_pfn, end);
- reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
- BOOTMEM_EXCLUSIVE);
- }
-#endif
-}
-
-static void __init setup_bootmem_allocator(void)
-{
- int i;
- for (i = 0; i < MAX_NUMNODES; ++i)
- setup_bootmem_allocator_node(i);
-
- /* Reserve any memory excluded by "memmap" arguments. */
- for (i = 0; i < memmap_nr; ++i) {
- struct memmap_entry *m = &memmap_map[i];
- reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT);
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
- /* Make sure the initrd memory region is not modified. */
- if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
- BOOTMEM_EXCLUSIVE)) {
- pr_crit("The initrd memory region has been polluted. Disabling it.\n");
- initrd_start = 0;
- initrd_end = 0;
- } else {
- /*
- * Translate initrd_start & initrd_end from PA to VA for
- * future access.
- */
- initrd_start += PAGE_OFFSET;
- initrd_end += PAGE_OFFSET;
- }
- }
-#endif
-
-#ifdef CONFIG_KEXEC
- if (crashk_res.start != crashk_res.end)
- reserve_bootmem(crashk_res.start, resource_size(&crashk_res),
- BOOTMEM_DEFAULT);
-#endif
-}
-
-void *__init alloc_remap(int nid, unsigned long size)
-{
- int pages = node_end_pfn[nid] - node_start_pfn[nid];
- void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
- BUG_ON(size != pages * sizeof(struct page));
- memset(map, 0, size);
- return map;
-}
-
-static int __init percpu_size(void)
-{
- int size = __per_cpu_end - __per_cpu_start;
- size += PERCPU_MODULE_RESERVE;
- size += PERCPU_DYNAMIC_EARLY_SIZE;
- if (size < PCPU_MIN_UNIT_SIZE)
- size = PCPU_MIN_UNIT_SIZE;
- size = roundup(size, PAGE_SIZE);
-
- /* In several places we assume the per-cpu data fits on a huge page. */
- BUG_ON(kdata_huge && size > HPAGE_SIZE);
- return size;
-}
-
-static void __init zone_sizes_init(void)
-{
- unsigned long zones_size[MAX_NR_ZONES] = { 0 };
- int size = percpu_size();
- int num_cpus = smp_height * smp_width;
- const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
-
- int i;
-
- for (i = 0; i < num_cpus; ++i)
- node_percpu[cpu_to_node(i)] += size;
-
- for_each_online_node(i) {
- unsigned long start = node_start_pfn[i];
- unsigned long end = node_end_pfn[i];
-#ifdef CONFIG_HIGHMEM
- unsigned long lowmem_end = node_lowmem_end_pfn[i];
-#else
- unsigned long lowmem_end = end;
-#endif
- int memmap_size = (end - start) * sizeof(struct page);
- node_free_pfn[i] = start;
-
- /*
- * Set aside pages for per-cpu data and the mem_map array.
- *
- * Since the per-cpu data requires special homecaching,
- * if we are in kdata_huge mode, we put it at the end of
- * the lowmem region. If we're not in kdata_huge mode,
- * we take the per-cpu pages from the bottom of the
- * controller, since that avoids fragmenting a huge page
- * that users might want. We always take the memmap
- * from the bottom of the controller, since with
- * kdata_huge that lets it be under a huge TLB entry.
- *
- * If the user has requested isolnodes for a controller,
- * though, there'll be no lowmem, so we just alloc_bootmem
- * the memmap. There will be no percpu memory either.
- */
- if (i != 0 && node_isset(i, isolnodes)) {
- node_memmap_pfn[i] =
- alloc_bootmem_pfn(0, memmap_size, 0);
- BUG_ON(node_percpu[i] != 0);
- } else if (node_has_bootmem(start)) {
- unsigned long goal = 0;
- node_memmap_pfn[i] =
- alloc_bootmem_pfn(i, memmap_size, 0);
- if (kdata_huge)
- goal = PFN_PHYS(lowmem_end) - node_percpu[i];
- if (node_percpu[i])
- node_percpu_pfn[i] =
- alloc_bootmem_pfn(i, node_percpu[i],
- goal);
- } else {
- /* In non-bootmem zones, just reserve some pages. */
- node_memmap_pfn[i] = node_free_pfn[i];
- node_free_pfn[i] += PFN_UP(memmap_size);
- if (!kdata_huge) {
- node_percpu_pfn[i] = node_free_pfn[i];
- node_free_pfn[i] += PFN_UP(node_percpu[i]);
- } else {
- node_percpu_pfn[i] =
- lowmem_end - PFN_UP(node_percpu[i]);
- }
- }
-
-#ifdef CONFIG_HIGHMEM
- if (start > lowmem_end) {
- zones_size[ZONE_NORMAL] = 0;
- zones_size[ZONE_HIGHMEM] = end - start;
- } else {
- zones_size[ZONE_NORMAL] = lowmem_end - start;
- zones_size[ZONE_HIGHMEM] = end - lowmem_end;
- }
-#else
- zones_size[ZONE_NORMAL] = end - start;
-#endif
-
- if (start < dma_end) {
- zones_size[ZONE_DMA32] = min(zones_size[ZONE_NORMAL],
- dma_end - start);
- zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA32];
- } else {
- zones_size[ZONE_DMA32] = 0;
- }
-
- /* Take zone metadata from controller 0 if we're isolnode. */
- if (node_isset(i, isolnodes))
- NODE_DATA(i)->bdata = &bootmem_node_data[0];
-
- free_area_init_node(i, zones_size, start, NULL);
- printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
- PFN_UP(node_percpu[i]));
-
- /* Track the type of memory on each node */
- if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA32])
- node_set_state(i, N_NORMAL_MEMORY);
-#ifdef CONFIG_HIGHMEM
- if (end != start)
- node_set_state(i, N_HIGH_MEMORY);
-#endif
-
- node_set_online(i);
- }
-}
-
-#ifdef CONFIG_NUMA
-
-/* which logical CPUs are on which nodes */
-struct cpumask node_2_cpu_mask[MAX_NUMNODES] __ro_after_init;
-EXPORT_SYMBOL(node_2_cpu_mask);
-
-/* which node each logical CPU is on */
-char cpu_2_node[NR_CPUS] __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
-EXPORT_SYMBOL(cpu_2_node);
-
-/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
-static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
-{
- if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
- return -1;
- else
- return cpu_to_node(cpu);
-}
-
-/* Return number of immediately-adjacent tiles sharing the same NUMA node. */
-static int __init node_neighbors(int node, int cpu,
- struct cpumask *unbound_cpus)
-{
- int neighbors = 0;
- int w = smp_width;
- int h = smp_height;
- int x = cpu % w;
- int y = cpu / w;
- if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
- ++neighbors;
- if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
- ++neighbors;
- if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
- ++neighbors;
- if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
- ++neighbors;
- return neighbors;
-}
-
-static void __init setup_numa_mapping(void)
-{
- u8 distance[MAX_NUMNODES][NR_CPUS];
- HV_Coord coord;
- int cpu, node, cpus, i, x, y;
- int num_nodes = num_online_nodes();
- struct cpumask unbound_cpus;
- nodemask_t default_nodes;
-
- cpumask_clear(&unbound_cpus);
-
- /* Get set of nodes we will use for defaults */
- nodes_andnot(default_nodes, node_online_map, isolnodes);
- if (nodes_empty(default_nodes)) {
- BUG_ON(!node_isset(0, node_online_map));
- pr_err("Forcing NUMA node zero available as a default node\n");
- node_set(0, default_nodes);
- }
-
- /* Populate the distance[] array */
- memset(distance, -1, sizeof(distance));
- cpu = 0;
- for (coord.y = 0; coord.y < smp_height; ++coord.y) {
- for (coord.x = 0; coord.x < smp_width;
- ++coord.x, ++cpu) {
- BUG_ON(cpu >= nr_cpu_ids);
- if (!cpu_possible(cpu)) {
- cpu_2_node[cpu] = -1;
- continue;
- }
- for_each_node_mask(node, default_nodes) {
- HV_MemoryControllerInfo info =
- hv_inquire_memory_controller(
- coord, node_controller[node]);
- distance[node][cpu] =
- ABS(info.coord.x) + ABS(info.coord.y);
- }
- cpumask_set_cpu(cpu, &unbound_cpus);
- }
- }
- cpus = cpu;
-
- /*
- * Round-robin through the NUMA nodes until all the cpus are
- * assigned. We could be more clever here (e.g. create four
- * sorted linked lists on the same set of cpu nodes, and pull
- * off them in round-robin sequence, removing from all four
- * lists each time) but given the relatively small numbers
- * involved, O(n^2) seem OK for a one-time cost.
- */
- node = first_node(default_nodes);
- while (!cpumask_empty(&unbound_cpus)) {
- int best_cpu = -1;
- int best_distance = INT_MAX;
- for (cpu = 0; cpu < cpus; ++cpu) {
- if (cpumask_test_cpu(cpu, &unbound_cpus)) {
- /*
- * Compute metric, which is how much
- * closer the cpu is to this memory
- * controller than the others, shifted
- * up, and then the number of
- * neighbors already in the node as an
- * epsilon adjustment to try to keep
- * the nodes compact.
- */
- int d = distance[node][cpu] * num_nodes;
- for_each_node_mask(i, default_nodes) {
- if (i != node)
- d -= distance[i][cpu];
- }
- d *= 8; /* allow space for epsilon */
- d -= node_neighbors(node, cpu, &unbound_cpus);
- if (d < best_distance) {
- best_cpu = cpu;
- best_distance = d;
- }
- }
- }
- BUG_ON(best_cpu < 0);
- cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
- cpu_2_node[best_cpu] = node;
- cpumask_clear_cpu(best_cpu, &unbound_cpus);
- node = next_node_in(node, default_nodes);
- }
-
- /* Print out node assignments and set defaults for disabled cpus */
- cpu = 0;
- for (y = 0; y < smp_height; ++y) {
- printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
- for (x = 0; x < smp_width; ++x, ++cpu) {
- if (cpu_to_node(cpu) < 0) {
- pr_cont(" -");
- cpu_2_node[cpu] = first_node(default_nodes);
- } else {
- pr_cont(" %d", cpu_to_node(cpu));
- }
- }
- pr_cont("\n");
- }
-}
-
-static struct cpu cpu_devices[NR_CPUS];
-
-static int __init topology_init(void)
-{
- int i;
-
- for_each_online_node(i)
- register_one_node(i);
-
- for (i = 0; i < smp_height * smp_width; ++i)
- register_cpu(&cpu_devices[i], i);
-
- return 0;
-}
-
-subsys_initcall(topology_init);
-
-#else /* !CONFIG_NUMA */
-
-#define setup_numa_mapping() do { } while (0)
-
-#endif /* CONFIG_NUMA */
-
-/*
- * Initialize hugepage support on this cpu. We do this on all cores
- * early in boot: before argument parsing for the boot cpu, and after
- * argument parsing but before the init functions run on the secondaries.
- * So the values we set up here in the hypervisor may be overridden on
- * the boot cpu as arguments are parsed.
- */
-static void init_super_pages(void)
-{
-#ifdef CONFIG_HUGETLB_SUPER_PAGES
- int i;
- for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
- hv_set_pte_super_shift(i, huge_shift[i]);
-#endif
-}
-
-/**
- * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
- * @boot: Is this the boot cpu?
- *
- * Called from setup_arch() on the boot cpu, or online_secondary().
- */
-void setup_cpu(int boot)
-{
- /* The boot cpu sets up its permanent mappings much earlier. */
- if (!boot)
- store_permanent_mappings();
-
- /* Allow asynchronous TLB interrupts. */
-#if CHIP_HAS_TILE_DMA()
- arch_local_irq_unmask(INT_DMATLB_MISS);
- arch_local_irq_unmask(INT_DMATLB_ACCESS);
-#endif
-#ifdef __tilegx__
- arch_local_irq_unmask(INT_SINGLE_STEP_K);
-#endif
-
- /*
- * Allow user access to many generic SPRs, like the cycle
- * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
- */
- __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
-
-#if CHIP_HAS_SN()
- /* Static network is not restricted. */
- __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
-#endif
-
- /*
- * Set the MPL for interrupt control 0 & 1 to the corresponding
- * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
- * SPRs, as well as the interrupt mask.
- */
- __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
- __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
-
- /* Initialize IRQ support for this cpu. */
- setup_irq_regs();
-
-#ifdef CONFIG_HARDWALL
- /* Reset the network state on this cpu. */
- reset_network_state();
-#endif
-
- init_super_pages();
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-
-static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs";
-
-static int __init setup_initramfs_file(char *str)
-{
- if (str == NULL)
- return -EINVAL;
- strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
- set_initramfs_file = 1;
-
- return 0;
-}
-early_param("initramfs_file", setup_initramfs_file);
-
-/*
- * We look for a file called "initramfs" in the hvfs. If there is one, we
- * allocate some memory for it and it will be unpacked to the initramfs.
- * If it's compressed, the initd code will uncompress it first.
- */
-static void __init load_hv_initrd(void)
-{
- HV_FS_StatInfo stat;
- int fd, rc;
- void *initrd;
-
- /* If initrd has already been set, skip initramfs file in hvfs. */
- if (initrd_start)
- return;
-
- fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
- if (fd == HV_ENOENT) {
- if (set_initramfs_file) {
- pr_warn("No such hvfs initramfs file '%s'\n",
- initramfs_file);
- return;
- } else {
- /* Try old backwards-compatible name. */
- fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
- if (fd == HV_ENOENT)
- return;
- }
- }
- BUG_ON(fd < 0);
- stat = hv_fs_fstat(fd);
- BUG_ON(stat.size < 0);
- if (stat.flags & HV_FS_ISDIR) {
- pr_warn("Ignoring hvfs file '%s': it's a directory\n",
- initramfs_file);
- return;
- }
- initrd = alloc_bootmem_pages(stat.size);
- rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
- if (rc != stat.size) {
- pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
- stat.size, initramfs_file, rc);
- free_initrd_mem((unsigned long) initrd, stat.size);
- return;
- }
- initrd_start = (unsigned long) initrd;
- initrd_end = initrd_start + stat.size;
-}
-
-void __init free_initrd_mem(unsigned long begin, unsigned long end)
-{
- free_bootmem_late(__pa(begin), end - begin);
-}
-
-static int __init setup_initrd(char *str)
-{
- char *endp;
- unsigned long initrd_size;
-
- initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
- if (initrd_size == 0 || *endp != '@')
- return -EINVAL;
-
- initrd_start = simple_strtoul(endp+1, &endp, 0);
- if (initrd_start == 0)
- return -EINVAL;
-
- initrd_end = initrd_start + initrd_size;
-
- return 0;
-}
-early_param("initrd", setup_initrd);
-
-#else
-static inline void load_hv_initrd(void) {}
-#endif /* CONFIG_BLK_DEV_INITRD */
-
-static void __init validate_hv(void)
-{
- /*
- * It may already be too late, but let's check our built-in
- * configuration against what the hypervisor is providing.
- */
- unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
- int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
- int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
- HV_ASIDRange asid_range;
-
-#ifndef CONFIG_SMP
- HV_Topology topology = hv_inquire_topology();
- BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
- if (topology.width != 1 || topology.height != 1) {
- pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n",
- topology.width, topology.height);
- }
-#endif
-
- if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
- early_panic("Hypervisor glue size %ld is too big!\n",
- glue_size);
- if (hv_page_size != PAGE_SIZE)
- early_panic("Hypervisor page size %#x != our %#lx\n",
- hv_page_size, PAGE_SIZE);
- if (hv_hpage_size != HPAGE_SIZE)
- early_panic("Hypervisor huge page size %#x != our %#lx\n",
- hv_hpage_size, HPAGE_SIZE);
-
-#ifdef CONFIG_SMP
- /*
- * Some hypervisor APIs take a pointer to a bitmap array
- * whose size is at least the number of cpus on the chip.
- * We use a struct cpumask for this, so it must be big enough.
- */
- if ((smp_height * smp_width) > nr_cpu_ids)
- early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %u\n",
- smp_height, smp_width, nr_cpu_ids);
-#endif
-
- /*
- * Check that we're using allowed ASIDs, and initialize the
- * various asid variables to their appropriate initial states.
- */
- asid_range = hv_inquire_asid(0);
- min_asid = asid_range.start;
- __this_cpu_write(current_asid, min_asid);
- max_asid = asid_range.start + asid_range.size - 1;
-
- if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
- sizeof(chip_model)) < 0) {
- pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
- strlcpy(chip_model, "unknown", sizeof(chip_model));
- }
-}
-
-static void __init validate_va(void)
-{
-#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
- /*
- * Similarly, make sure we're only using allowed VAs.
- * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
- * and 0 .. KERNEL_HIGH_VADDR.
- * In addition, make sure we CAN'T use the end of memory, since
- * we use the last chunk of each pgd for the pgd_list.
- */
- int i, user_kernel_ok = 0;
- unsigned long max_va = 0;
- unsigned long list_va =
- ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
-
- for (i = 0; ; ++i) {
- HV_VirtAddrRange range = hv_inquire_virtual(i);
- if (range.size == 0)
- break;
- if (range.start <= MEM_USER_INTRPT &&
- range.start + range.size >= MEM_HV_START)
- user_kernel_ok = 1;
- if (range.start == 0)
- max_va = range.size;
- BUG_ON(range.start + range.size > list_va);
- }
- if (!user_kernel_ok)
- early_panic("Hypervisor not configured for user/kernel VAs\n");
- if (max_va == 0)
- early_panic("Hypervisor not configured for low VAs\n");
- if (max_va < KERNEL_HIGH_VADDR)
- early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
- max_va, KERNEL_HIGH_VADDR);
-
- /* Kernel PCs must have their high bit set; see intvec.S. */
- if ((long)VMALLOC_START >= 0)
- early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n"
- "Reconfigure the kernel with smaller VMALLOC_RESERVE\n",
- VMALLOC_START);
-#endif
-}
-
-/*
- * cpu_lotar_map lists all the cpus that are valid for the supervisor
- * to cache data on at a page level, i.e. what cpus can be placed in
- * the LOTAR field of a PTE. It is equivalent to the set of possible
- * cpus plus any other cpus that are willing to share their cache.
- * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
- */
-struct cpumask __ro_after_init cpu_lotar_map;
-EXPORT_SYMBOL(cpu_lotar_map);
-
-/*
- * hash_for_home_map lists all the tiles that hash-for-home data
- * will be cached on. Note that this may includes tiles that are not
- * valid for this supervisor to use otherwise (e.g. if a hypervisor
- * device is being shared between multiple supervisors).
- * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
- */
-struct cpumask hash_for_home_map;
-EXPORT_SYMBOL(hash_for_home_map);
-
-/*
- * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
- * flush on our behalf. It is set to cpu_possible_mask OR'ed with
- * hash_for_home_map, and it is what should be passed to
- * hv_flush_remote() to flush all caches. Note that if there are
- * dedicated hypervisor driver tiles that have authorized use of their
- * cache, those tiles will only appear in cpu_lotar_map, NOT in
- * cpu_cacheable_map, as they are a special case.
- */
-struct cpumask __ro_after_init cpu_cacheable_map;
-EXPORT_SYMBOL(cpu_cacheable_map);
-
-static __initdata struct cpumask disabled_map;
-
-static int __init disabled_cpus(char *str)
-{
- int boot_cpu = smp_processor_id();
-
- if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
- return -EINVAL;
- if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
- pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
- cpumask_clear_cpu(boot_cpu, &disabled_map);
- }
- return 0;
-}
-
-early_param("disabled_cpus", disabled_cpus);
-
-void __init print_disabled_cpus(void)
-{
- if (!cpumask_empty(&disabled_map))
- pr_info("CPUs not available for Linux: %*pbl\n",
- cpumask_pr_args(&disabled_map));
-}
-
-static void __init setup_cpu_maps(void)
-{
- struct cpumask hv_disabled_map, cpu_possible_init;
- int boot_cpu = smp_processor_id();
- int cpus, i, rc;
-
- /* Learn which cpus are allowed by the hypervisor. */
- rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
- (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
- sizeof(cpu_cacheable_map));
- if (rc < 0)
- early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
- if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
- early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
-
- /* Compute the cpus disabled by the hvconfig file. */
- cpumask_complement(&hv_disabled_map, &cpu_possible_init);
-
- /* Include them with the cpus disabled by "disabled_cpus". */
- cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
-
- /*
- * Disable every cpu after "setup_max_cpus". But don't mark
- * as disabled the cpus that are outside of our initial rectangle,
- * since that turns out to be confusing.
- */
- cpus = 1; /* this cpu */
- cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */
- for (i = 0; cpus < setup_max_cpus; ++i)
- if (!cpumask_test_cpu(i, &disabled_map))
- ++cpus;
- for (; i < smp_height * smp_width; ++i)
- cpumask_set_cpu(i, &disabled_map);
- cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
- for (i = smp_height * smp_width; i < NR_CPUS; ++i)
- cpumask_clear_cpu(i, &disabled_map);
-
- /*
- * Setup cpu_possible map as every cpu allocated to us, minus
- * the results of any "disabled_cpus" settings.
- */
- cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
- init_cpu_possible(&cpu_possible_init);
-
- /* Learn which cpus are valid for LOTAR caching. */
- rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
- (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
- sizeof(cpu_lotar_map));
- if (rc < 0) {
- pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
- cpu_lotar_map = *cpu_possible_mask;
- }
-
- /* Retrieve set of CPUs used for hash-for-home caching */
- rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
- (HV_VirtAddr) hash_for_home_map.bits,
- sizeof(hash_for_home_map));
- if (rc < 0)
- early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
- cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
-}
-
-
-static int __init dataplane(char *str)
-{
- pr_warn("WARNING: dataplane support disabled in this kernel\n");
- return 0;
-}
-
-early_param("dataplane", dataplane);
-
-#ifdef CONFIG_NO_HZ_FULL
-/* Warn if hypervisor shared cpus are marked as nohz_full. */
-static int __init check_nohz_full_cpus(void)
-{
- struct cpumask shared;
- int cpu;
-
- if (hv_inquire_tiles(HV_INQ_TILES_SHARED,
- (HV_VirtAddr) shared.bits, sizeof(shared)) < 0) {
- pr_warn("WARNING: No support for inquiring hv shared tiles\n");
- return 0;
- }
- for_each_cpu(cpu, &shared) {
- if (tick_nohz_full_cpu(cpu))
- pr_warn("WARNING: nohz_full cpu %d receives hypervisor interrupts!\n",
- cpu);
- }
- return 0;
-}
-arch_initcall(check_nohz_full_cpus);
-#endif
-
-#ifdef CONFIG_CMDLINE_BOOL
-static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
-#endif
-
-void __init setup_arch(char **cmdline_p)
-{
- int len;
-
-#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
- len = hv_get_command_line((HV_VirtAddr) boot_command_line,
- COMMAND_LINE_SIZE);
- if (boot_command_line[0])
- pr_warn("WARNING: ignoring dynamic command line \"%s\"\n",
- boot_command_line);
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
- char *hv_cmdline;
-#if defined(CONFIG_CMDLINE_BOOL)
- if (builtin_cmdline[0]) {
- int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
- COMMAND_LINE_SIZE);
- if (builtin_len < COMMAND_LINE_SIZE-1)
- boot_command_line[builtin_len++] = ' ';
- hv_cmdline = &boot_command_line[builtin_len];
- len = COMMAND_LINE_SIZE - builtin_len;
- } else
-#endif
- {
- hv_cmdline = boot_command_line;
- len = COMMAND_LINE_SIZE;
- }
- len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
- if (len < 0 || len > COMMAND_LINE_SIZE)
- early_panic("hv_get_command_line failed: %d\n", len);
-#endif
-
- *cmdline_p = boot_command_line;
-
- /* Set disabled_map and setup_max_cpus very early */
- parse_early_param();
-
- /* Make sure the kernel is compatible with the hypervisor. */
- validate_hv();
- validate_va();
-
- setup_cpu_maps();
-
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- /*
- * Initialize the PCI structures. This is done before memory
- * setup so that we know whether or not a pci_reserve region
- * is necessary.
- */
- if (tile_pci_init() == 0)
- pci_reserve_mb = 0;
-
- /* PCI systems reserve a region just below 4GB for mapping iomem. */
- pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
- pci_reserve_start_pfn = pci_reserve_end_pfn -
- (pci_reserve_mb << (20 - PAGE_SHIFT));
-#endif
-
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
-
- setup_memory();
- store_permanent_mappings();
- setup_bootmem_allocator();
-
- /*
- * NOTE: before this point _nobody_ is allowed to allocate
- * any memory using the bootmem allocator.
- */
-
-#ifdef CONFIG_SWIOTLB
- swiotlb_init(0);
-#endif
-
- paging_init();
- setup_numa_mapping();
- zone_sizes_init();
- set_page_homes();
- setup_cpu(1);
- setup_clock();
- load_hv_initrd();
-}
-
-
-/*
- * Set up per-cpu memory.
- */
-
-unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init;
-EXPORT_SYMBOL(__per_cpu_offset);
-
-static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
-static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
-
-/*
- * As the percpu code allocates pages, we return the pages from the
- * end of the node for the specified cpu.
- */
-static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
-{
- int nid = cpu_to_node(cpu);
- unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
-
- BUG_ON(size % PAGE_SIZE != 0);
- pfn_offset[nid] += size / PAGE_SIZE;
- BUG_ON(node_percpu[nid] < size);
- node_percpu[nid] -= size;
- if (percpu_pfn[cpu] == 0)
- percpu_pfn[cpu] = pfn;
- return pfn_to_kaddr(pfn);
-}
-
-/*
- * Pages reserved for percpu memory are not freeable, and in any case we are
- * on a short path to panic() in setup_per_cpu_area() at this point anyway.
- */
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
-}
-
-/*
- * Set up vmalloc page tables using bootmem for the percpu code.
- */
-static void __init pcpu_fc_populate_pte(unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- BUG_ON(pgd_addr_invalid(addr));
- if (addr < VMALLOC_START || addr >= VMALLOC_END)
- panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
- addr, VMALLOC_START, VMALLOC_END);
-
- pgd = swapper_pg_dir + pgd_index(addr);
- pud = pud_offset(pgd, addr);
- BUG_ON(!pud_present(*pud));
- pmd = pmd_offset(pud, addr);
- if (pmd_present(*pmd)) {
- BUG_ON(pmd_huge_page(*pmd));
- } else {
- pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
- HV_PAGE_TABLE_ALIGN, 0);
- pmd_populate_kernel(&init_mm, pmd, pte);
- }
-}
-
-void __init setup_per_cpu_areas(void)
-{
- struct page *pg;
- unsigned long delta, pfn, lowmem_va;
- unsigned long size = percpu_size();
- char *ptr;
- int rc, cpu, i;
-
- rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
- pcpu_fc_free, pcpu_fc_populate_pte);
- if (rc < 0)
- panic("Cannot initialize percpu area (err=%d)", rc);
-
- delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
- for_each_possible_cpu(cpu) {
- __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
-
- /* finv the copy out of cache so we can change homecache */
- ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
- __finv_buffer(ptr, size);
- pfn = percpu_pfn[cpu];
-
- /* Rewrite the page tables to cache on that cpu */
- pg = pfn_to_page(pfn);
- for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
-
- /* Update the vmalloc mapping and page home. */
- unsigned long addr = (unsigned long)ptr + i;
- pte_t *ptep = virt_to_kpte(addr);
- pte_t pte = *ptep;
- BUG_ON(pfn != pte_pfn(pte));
- pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
- pte = set_remote_cache_cpu(pte, cpu);
- set_pte_at(&init_mm, addr, ptep, pte);
-
- /* Update the lowmem mapping for consistency. */
- lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
- ptep = virt_to_kpte(lowmem_va);
- if (pte_huge(*ptep)) {
- printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
- lowmem_va);
- shatter_pmd((pmd_t *)ptep);
- ptep = virt_to_kpte(lowmem_va);
- BUG_ON(pte_huge(*ptep));
- }
- BUG_ON(pfn != pte_pfn(*ptep));
- set_pte_at(&init_mm, lowmem_va, ptep, pte);
- }
- }
-
- /* Set our thread pointer appropriately. */
- set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
-
- /* Make sure the finv's have completed. */
- mb_incoherent();
-
- /* Flush the TLB so we reference it properly from here on out. */
- local_flush_tlb_all();
-}
-
-static struct resource data_resource = {
- .name = "Kernel data",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
-};
-
-static struct resource code_resource = {
- .name = "Kernel code",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
-};
-
-/*
- * On Pro, we reserve all resources above 4GB so that PCI won't try to put
- * mappings above 4GB.
- */
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
-static struct resource* __init
-insert_non_bus_resource(void)
-{
- struct resource *res =
- kzalloc(sizeof(struct resource), GFP_ATOMIC);
- if (!res)
- return NULL;
- res->name = "Non-Bus Physical Address Space";
- res->start = (1ULL << 32);
- res->end = -1LL;
- res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
- if (insert_resource(&iomem_resource, res)) {
- kfree(res);
- return NULL;
- }
- return res;
-}
-#endif
-
-static struct resource* __init
-insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
-{
- struct resource *res =
- kzalloc(sizeof(struct resource), GFP_ATOMIC);
- if (!res)
- return NULL;
- res->start = start_pfn << PAGE_SHIFT;
- res->end = (end_pfn << PAGE_SHIFT) - 1;
- res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
- if (reserved) {
- res->name = "Reserved";
- } else {
- res->name = "System RAM";
- res->flags |= IORESOURCE_SYSRAM;
- }
- if (insert_resource(&iomem_resource, res)) {
- kfree(res);
- return NULL;
- }
- return res;
-}
-
-/*
- * Request address space for all standard resources
- *
- * If the system includes PCI root complex drivers, we need to create
- * a window just below 4GB where PCI BARs can be mapped.
- */
-static int __init request_standard_resources(void)
-{
- int i;
- enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- insert_non_bus_resource();
-#endif
-
- for_each_online_node(i) {
- u64 start_pfn = node_start_pfn[i];
- u64 end_pfn = node_end_pfn[i];
-
-#if defined(CONFIG_PCI) && !defined(__tilegx__)
- if (start_pfn <= pci_reserve_start_pfn &&
- end_pfn > pci_reserve_start_pfn) {
- if (end_pfn > pci_reserve_end_pfn)
- insert_ram_resource(pci_reserve_end_pfn,
- end_pfn, 0);
- end_pfn = pci_reserve_start_pfn;
- }
-#endif
- insert_ram_resource(start_pfn, end_pfn, 0);
- }
-
- code_resource.start = __pa(_text - CODE_DELTA);
- code_resource.end = __pa(_etext - CODE_DELTA)-1;
- data_resource.start = __pa(_sdata);
- data_resource.end = __pa(_end)-1;
-
- insert_resource(&iomem_resource, &code_resource);
- insert_resource(&iomem_resource, &data_resource);
-
- /* Mark any "memmap" regions busy for the resource manager. */
- for (i = 0; i < memmap_nr; ++i) {
- struct memmap_entry *m = &memmap_map[i];
- insert_ram_resource(PFN_DOWN(m->addr),
- PFN_UP(m->addr + m->size - 1), 1);
- }
-
-#ifdef CONFIG_KEXEC
- insert_resource(&iomem_resource, &crashk_res);
-#endif
-
- return 0;
-}
-
-subsys_initcall(request_standard_resources);
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
deleted file mode 100644
index f2bf557bb005..000000000000
--- a/arch/tile/kernel/signal.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/personality.h>
-#include <linux/suspend.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
-#include <linux/compat.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <asm/processor.h>
-#include <asm/ucontext.h>
-#include <asm/sigframe.h>
-#include <asm/syscalls.h>
-#include <asm/vdso.h>
-#include <arch/interrupts.h>
-
-#define DEBUG_SIG 0
-
-/*
- * Do a signal return; undo the signal stack.
- */
-
-int restore_sigcontext(struct pt_regs *regs,
- struct sigcontext __user *sc)
-{
- int err;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- /*
- * Enforce that sigcontext is like pt_regs, and doesn't mess
- * up our stack alignment rules.
- */
- BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
- BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
- err = __copy_from_user(regs, sc, sizeof(*regs));
-
- /* Ensure that the PL is always set to USER_PL. */
- regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
-
- regs->faultnum = INT_SWINT_1_SIGRETURN;
-
- return err;
-}
-
-void signal_fault(const char *type, struct pt_regs *regs,
- void __user *frame, int sig)
-{
- trace_unhandled_signal(type, regs, (unsigned long)frame, SIGSEGV);
- force_sigsegv(sig, current);
-}
-
-/* The assembly shim for this function arranges to ignore the return value. */
-SYSCALL_DEFINE0(rt_sigreturn)
-{
- struct pt_regs *regs = current_pt_regs();
- struct rt_sigframe __user *frame =
- (struct rt_sigframe __user *)(regs->sp);
- sigset_t set;
-
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
- goto badframe;
-
- if (restore_altstack(&frame->uc.uc_stack))
- goto badframe;
-
- return 0;
-
-badframe:
- signal_fault("bad sigreturn frame", regs, frame, 0);
- return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-
-int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
-{
- return __copy_to_user(sc, regs, sizeof(*regs));
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *get_sigframe(struct k_sigaction *ka,
- struct pt_regs *regs,
- size_t frame_size)
-{
- unsigned long sp;
-
- /* Default to using normal stack */
- sp = regs->sp;
-
- /*
- * If we are on the alternate signal stack and would overflow
- * it, don't. Return an always-bogus address instead so we
- * will die with SIGSEGV.
- */
- if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
- return (void __user __force *)-1UL;
-
- /* This is the X/Open sanctioned signal stack switching. */
- if (ka->sa.sa_flags & SA_ONSTACK) {
- if (sas_ss_flags(sp) == 0)
- sp = current->sas_ss_sp + current->sas_ss_size;
- }
-
- sp -= frame_size;
- /*
- * Align the stack pointer according to the TILE ABI,
- * i.e. so that on function entry (sp & 15) == 0.
- */
- sp &= -16UL;
- return (void __user *) sp;
-}
-
-static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
- struct pt_regs *regs)
-{
- unsigned long restorer;
- struct rt_sigframe __user *frame;
- int err = 0, sig = ksig->sig;
-
- frame = get_sigframe(&ksig->ka, regs, sizeof(*frame));
-
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- goto err;
-
- /* Always write at least the signal number for the stack backtracer. */
- if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- /* At sigreturn time, restore the callee-save registers too. */
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- regs->flags |= PT_FLAGS_RESTORE_REGS;
- } else {
- err |= __put_user(ksig->info.si_signo, &frame->info.si_signo);
- }
-
- /* Create the ucontext. */
- err |= __clear_user(&frame->save_area, sizeof(frame->save_area));
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(NULL, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, regs);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
- goto err;
-
- restorer = VDSO_SYM(&__vdso_rt_sigreturn);
- if (ksig->ka.sa.sa_flags & SA_RESTORER)
- restorer = (unsigned long) ksig->ka.sa.sa_restorer;
-
- /*
- * Set up registers for signal handler.
- * Registers that we don't modify keep the value they had from
- * user-space at the time we took the signal.
- * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
- * since some things rely on this (e.g. glibc's debug/segfault.c).
- */
- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
- regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
- regs->sp = (unsigned long) frame;
- regs->lr = restorer;
- regs->regs[0] = (unsigned long) sig;
- regs->regs[1] = (unsigned long) &frame->info;
- regs->regs[2] = (unsigned long) &frame->uc;
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- return 0;
-
-err:
- trace_unhandled_signal("bad sigreturn frame", regs,
- (unsigned long)frame, SIGSEGV);
- return -EFAULT;
-}
-
-/*
- * OK, we're invoking a handler
- */
-
-static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
-{
- sigset_t *oldset = sigmask_to_save();
- int ret;
-
- /* Are we from a system call? */
- if (regs->faultnum == INT_SWINT_1) {
- /* If so, check system call restarting.. */
- switch (regs->regs[0]) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->regs[0] = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
- regs->regs[0] = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- /* Reload caller-saves to restore r0..r5 and r10. */
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- regs->regs[0] = regs->orig_r0;
- regs->pc -= 8;
- }
- }
-
- /* Set up the stack frame */
-#ifdef CONFIG_COMPAT
- if (is_compat_task())
- ret = compat_setup_rt_frame(ksig, oldset, regs);
- else
-#endif
- ret = setup_rt_frame(ksig, oldset, regs);
-
- signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-void do_signal(struct pt_regs *regs)
-{
- struct ksignal ksig;
-
- /*
- * i386 will check if we're coming from kernel mode and bail out
- * here. In my experience this just turns weird crashes into
- * weird spin-hangs. But if we find a case where this seems
- * helpful, we can reinstate the check on "!user_mode(regs)".
- */
-
- if (get_signal(&ksig)) {
- /* Whee! Actually deliver the signal. */
- handle_signal(&ksig, regs);
- goto done;
- }
-
- /* Did we come from a system call? */
- if (regs->faultnum == INT_SWINT_1) {
- /* Restart the system call - no handlers present */
- switch (regs->regs[0]) {
- case -ERESTARTNOHAND:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- regs->regs[0] = regs->orig_r0;
- regs->pc -= 8;
- break;
-
- case -ERESTART_RESTARTBLOCK:
- regs->flags |= PT_FLAGS_CALLER_SAVES;
- regs->regs[TREG_SYSCALL_NR] = __NR_restart_syscall;
- regs->pc -= 8;
- break;
- }
- }
-
- /* If there's no signal to deliver, just put the saved sigmask back. */
- restore_saved_sigmask();
-
-done:
- /* Avoid double syscall restart if there are nested signals. */
- regs->faultnum = INT_SWINT_1_SIGRETURN;
-}
-
-int show_unhandled_signals = 1;
-
-static int __init crashinfo(char *str)
-{
- const char *word;
-
- if (*str == '\0')
- show_unhandled_signals = 2;
- else if (*str != '=' || kstrtoint(++str, 0, &show_unhandled_signals) != 0)
- return 0;
-
- switch (show_unhandled_signals) {
- case 0:
- word = "No";
- break;
- case 1:
- word = "One-line";
- break;
- default:
- word = "Detailed";
- break;
- }
- pr_info("%s crash reports will be generated on the console\n", word);
- return 1;
-}
-__setup("crashinfo", crashinfo);
-
-static void dump_mem(void __user *address)
-{
- void __user *addr;
- enum { region_size = 256, bytes_per_line = 16 };
- int i, j, k;
- int found_readable_mem = 0;
-
- if (!access_ok(VERIFY_READ, address, 1)) {
- pr_err("Not dumping at address 0x%lx (kernel address)\n",
- (unsigned long)address);
- return;
- }
-
- addr = (void __user *)
- (((unsigned long)address & -bytes_per_line) - region_size/2);
- if (addr > address)
- addr = NULL;
- for (i = 0; i < region_size;
- addr += bytes_per_line, i += bytes_per_line) {
- unsigned char buf[bytes_per_line];
- char line[100];
- if (copy_from_user(buf, addr, bytes_per_line))
- continue;
- if (!found_readable_mem) {
- pr_err("Dumping memory around address 0x%lx:\n",
- (unsigned long)address);
- found_readable_mem = 1;
- }
- j = sprintf(line, REGFMT ":", (unsigned long)addr);
- for (k = 0; k < bytes_per_line; ++k)
- j += sprintf(&line[j], " %02x", buf[k]);
- pr_err("%s\n", line);
- }
- if (!found_readable_mem)
- pr_err("No readable memory around address 0x%lx\n",
- (unsigned long)address);
-}
-
-void trace_unhandled_signal(const char *type, struct pt_regs *regs,
- unsigned long address, int sig)
-{
- struct task_struct *tsk = current;
-
- if (show_unhandled_signals == 0)
- return;
-
- /* If the signal is handled, don't show it here. */
- if (!is_global_init(tsk)) {
- void __user *handler =
- tsk->sighand->action[sig-1].sa.sa_handler;
- if (handler != SIG_IGN && handler != SIG_DFL)
- return;
- }
-
- /* Rate-limit the one-line output, not the detailed output. */
- if (show_unhandled_signals <= 1 && !printk_ratelimit())
- return;
-
- printk("%s%s[%d]: %s at %lx pc "REGFMT" signal %d",
- task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
- tsk->comm, task_pid_nr(tsk), type, address, regs->pc, sig);
-
- print_vma_addr(KERN_CONT " in ", regs->pc);
-
- printk(KERN_CONT "\n");
-
- if (show_unhandled_signals > 1) {
- switch (sig) {
- case SIGILL:
- case SIGFPE:
- case SIGSEGV:
- case SIGBUS:
- pr_err("User crash: signal %d, trap %ld, address 0x%lx\n",
- sig, regs->faultnum, address);
- show_regs(regs);
- dump_mem((void __user *)address);
- break;
- default:
- pr_err("User crash: signal %d, trap %ld\n",
- sig, regs->faultnum);
- break;
- }
- }
-}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
deleted file mode 100644
index 479d8033a801..000000000000
--- a/arch/tile/kernel/single_step.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * A code-rewriter that enables instruction single-stepping.
- */
-
-#include <linux/smp.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/thread_info.h>
-#include <linux/uaccess.h>
-#include <linux/mman.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/prctl.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-#include <arch/abi.h>
-#include <arch/spr_def.h>
-#include <arch/opcode.h>
-
-
-#ifndef __tilegx__ /* Hardware support for single step unavailable. */
-
-#define signExtend17(val) sign_extend((val), 17)
-#define TILE_X1_MASK (0xffffffffULL << 31)
-
-enum mem_op {
- MEMOP_NONE,
- MEMOP_LOAD,
- MEMOP_STORE,
- MEMOP_LOAD_POSTINCR,
- MEMOP_STORE_POSTINCR
-};
-
-static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
- s32 offset)
-{
- tilepro_bundle_bits result;
-
- /* mask out the old offset */
- tilepro_bundle_bits mask = create_BrOff_X1(-1);
- result = n & (~mask);
-
- /* or in the new offset */
- result |= create_BrOff_X1(offset);
-
- return result;
-}
-
-static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
- int src)
-{
- tilepro_bundle_bits result;
- tilepro_bundle_bits op;
-
- result = n & (~TILE_X1_MASK);
-
- op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
- create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
- create_Dest_X1(dest) |
- create_SrcB_X1(TREG_ZERO) |
- create_SrcA_X1(src) ;
-
- result |= op;
- return result;
-}
-
-static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
-{
- return move_X1(n, TREG_ZERO, TREG_ZERO);
-}
-
-static inline tilepro_bundle_bits addi_X1(
- tilepro_bundle_bits n, int dest, int src, int imm)
-{
- n &= ~TILE_X1_MASK;
-
- n |= (create_SrcA_X1(src) |
- create_Dest_X1(dest) |
- create_Imm8_X1(imm) |
- create_S_X1(0) |
- create_Opcode_X1(IMM_0_OPCODE_X1) |
- create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
-
- return n;
-}
-
-static tilepro_bundle_bits rewrite_load_store_unaligned(
- struct single_step_state *state,
- tilepro_bundle_bits bundle,
- struct pt_regs *regs,
- enum mem_op mem_op,
- int size, int sign_ext)
-{
- unsigned char __user *addr;
- int val_reg, addr_reg, err, val;
- int align_ctl;
-
- align_ctl = unaligned_fixup;
- switch (task_thread_info(current)->align_ctl) {
- case PR_UNALIGN_NOPRINT:
- align_ctl = 1;
- break;
- case PR_UNALIGN_SIGBUS:
- align_ctl = 0;
- break;
- }
-
- /* Get address and value registers */
- if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
- addr_reg = get_SrcA_Y2(bundle);
- val_reg = get_SrcBDest_Y2(bundle);
- } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
- addr_reg = get_SrcA_X1(bundle);
- val_reg = get_Dest_X1(bundle);
- } else {
- addr_reg = get_SrcA_X1(bundle);
- val_reg = get_SrcB_X1(bundle);
- }
-
- /*
- * If registers are not GPRs, don't try to handle it.
- *
- * FIXME: we could handle non-GPR loads by getting the real value
- * from memory, writing it to the single step buffer, using a
- * temp_reg to hold a pointer to that memory, then executing that
- * instruction and resetting temp_reg. For non-GPR stores, it's a
- * little trickier; we could use the single step buffer for that
- * too, but we'd have to add some more state bits so that we could
- * call back in here to copy that value to the real target. For
- * now, we just handle the simple case.
- */
- if ((val_reg >= PTREGS_NR_GPRS &&
- (val_reg != TREG_ZERO ||
- mem_op == MEMOP_LOAD ||
- mem_op == MEMOP_LOAD_POSTINCR)) ||
- addr_reg >= PTREGS_NR_GPRS)
- return bundle;
-
- /* If it's aligned, don't handle it specially */
- addr = (void __user *)regs->regs[addr_reg];
- if (((unsigned long)addr % size) == 0)
- return bundle;
-
- /*
- * Return SIGBUS with the unaligned address, if requested.
- * Note that we return SIGBUS even for completely invalid addresses
- * as long as they are in fact unaligned; this matches what the
- * tilepro hardware would be doing, if it could provide us with the
- * actual bad address in an SPR, which it doesn't.
- */
- if (align_ctl == 0) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = addr;
-
- trace_unhandled_signal("unaligned trap", regs,
- (unsigned long)addr, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return (tilepro_bundle_bits) 0;
- }
-
- /* Handle unaligned load/store */
- if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
- unsigned short val_16;
- switch (size) {
- case 2:
- err = copy_from_user(&val_16, addr, sizeof(val_16));
- val = sign_ext ? ((short)val_16) : val_16;
- break;
- case 4:
- err = copy_from_user(&val, addr, sizeof(val));
- break;
- default:
- BUG();
- }
- if (err == 0) {
- state->update_reg = val_reg;
- state->update_value = val;
- state->update = 1;
- }
- } else {
- unsigned short val_16;
- val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
- switch (size) {
- case 2:
- val_16 = val;
- err = copy_to_user(addr, &val_16, sizeof(val_16));
- break;
- case 4:
- err = copy_to_user(addr, &val, sizeof(val));
- break;
- default:
- BUG();
- }
- }
-
- if (err) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = addr;
-
- trace_unhandled_signal("bad address for unaligned fixup", regs,
- (unsigned long)addr, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return (tilepro_bundle_bits) 0;
- }
-
- if (unaligned_printk || unaligned_fixup_count == 0) {
- pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
- current->pid, current->comm, regs->pc,
- mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
- "load" : "store",
- (unsigned long)addr);
- if (!unaligned_printk) {
-#define P pr_info
-P("\n");
-P("Unaligned fixups in the kernel will slow your application considerably.\n");
-P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
-P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
-P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
-P("access will become a SIGBUS you can debug. No further warnings will be\n");
-P("shown so as to avoid additional slowdown, but you can track the number\n");
-P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
-P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
-P("\n");
-#undef P
- }
- }
- ++unaligned_fixup_count;
-
- if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
- /* Convert the Y2 instruction to a prefetch. */
- bundle &= ~(create_SrcBDest_Y2(-1) |
- create_Opcode_Y2(-1));
- bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
- create_Opcode_Y2(LW_OPCODE_Y2));
- /* Replace the load postincr with an addi */
- } else if (mem_op == MEMOP_LOAD_POSTINCR) {
- bundle = addi_X1(bundle, addr_reg, addr_reg,
- get_Imm8_X1(bundle));
- /* Replace the store postincr with an addi */
- } else if (mem_op == MEMOP_STORE_POSTINCR) {
- bundle = addi_X1(bundle, addr_reg, addr_reg,
- get_Dest_Imm8_X1(bundle));
- } else {
- /* Convert the X1 instruction to a nop. */
- bundle &= ~(create_Opcode_X1(-1) |
- create_UnShOpcodeExtension_X1(-1) |
- create_UnOpcodeExtension_X1(-1));
- bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
- create_UnShOpcodeExtension_X1(
- UN_0_SHUN_0_OPCODE_X1) |
- create_UnOpcodeExtension_X1(
- NOP_UN_0_SHUN_0_OPCODE_X1));
- }
-
- return bundle;
-}
-
-/*
- * Called after execve() has started the new image. This allows us
- * to reset the info state. Note that the the mmap'ed memory, if there
- * was any, has already been unmapped by the exec.
- */
-void single_step_execve(void)
-{
- struct thread_info *ti = current_thread_info();
- kfree(ti->step_state);
- ti->step_state = NULL;
-}
-
-/*
- * single_step_once() - entry point when single stepping has been triggered.
- * @regs: The machine register state
- *
- * When we arrive at this routine via a trampoline, the single step
- * engine copies the executing bundle to the single step buffer.
- * If the instruction is a condition branch, then the target is
- * reset to one past the next instruction. If the instruction
- * sets the lr, then that is noted. If the instruction is a jump
- * or call, then the new target pc is preserved and the current
- * bundle instruction set to null.
- *
- * The necessary post-single-step rewriting information is stored in
- * single_step_state-> We use data segment values because the
- * stack will be rewound when we run the rewritten single-stepped
- * instruction.
- */
-void single_step_once(struct pt_regs *regs)
-{
- extern tilepro_bundle_bits __single_step_ill_insn;
- extern tilepro_bundle_bits __single_step_j_insn;
- extern tilepro_bundle_bits __single_step_addli_insn;
- extern tilepro_bundle_bits __single_step_auli_insn;
- struct thread_info *info = (void *)current_thread_info();
- struct single_step_state *state = info->step_state;
- int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
- tilepro_bundle_bits __user *buffer, *pc;
- tilepro_bundle_bits bundle;
- int temp_reg;
- int target_reg = TREG_LR;
- int err;
- enum mem_op mem_op = MEMOP_NONE;
- int size = 0, sign_ext = 0; /* happy compiler */
- int align_ctl;
-
- align_ctl = unaligned_fixup;
- switch (task_thread_info(current)->align_ctl) {
- case PR_UNALIGN_NOPRINT:
- align_ctl = 1;
- break;
- case PR_UNALIGN_SIGBUS:
- align_ctl = 0;
- break;
- }
-
- asm(
-" .pushsection .rodata.single_step\n"
-" .align 8\n"
-" .globl __single_step_ill_insn\n"
-"__single_step_ill_insn:\n"
-" ill\n"
-" .globl __single_step_addli_insn\n"
-"__single_step_addli_insn:\n"
-" { nop; addli r0, zero, 0 }\n"
-" .globl __single_step_auli_insn\n"
-"__single_step_auli_insn:\n"
-" { nop; auli r0, r0, 0 }\n"
-" .globl __single_step_j_insn\n"
-"__single_step_j_insn:\n"
-" j .\n"
-" .popsection\n"
- );
-
- /*
- * Enable interrupts here to allow touching userspace and the like.
- * The callers expect this: do_trap() already has interrupts
- * enabled, and do_work_pending() handles functions that enable
- * interrupts internally.
- */
- local_irq_enable();
-
- if (state == NULL) {
- /* allocate a page of writable, executable memory */
- state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
- if (state == NULL) {
- pr_err("Out of kernel memory trying to single-step\n");
- return;
- }
-
- /* allocate a cache line of writable, executable memory */
- buffer = (void __user *) vm_mmap(NULL, 0, 64,
- PROT_EXEC | PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS,
- 0);
-
- if (IS_ERR((void __force *)buffer)) {
- kfree(state);
- pr_err("Out of kernel pages trying to single-step\n");
- return;
- }
-
- state->buffer = buffer;
- state->is_enabled = 0;
-
- info->step_state = state;
-
- /* Validate our stored instruction patterns */
- BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
- ADDLI_OPCODE_X1);
- BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
- AULI_OPCODE_X1);
- BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
- BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
- BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
- }
-
- /*
- * If we are returning from a syscall, we still haven't hit the
- * "ill" for the swint1 instruction. So back the PC up to be
- * pointing at the swint1, but we'll actually return directly
- * back to the "ill" so we come back in via SIGILL as if we
- * had "executed" the swint1 without ever being in kernel space.
- */
- if (regs->faultnum == INT_SWINT_1)
- regs->pc -= 8;
-
- pc = (tilepro_bundle_bits __user *)(regs->pc);
- if (get_user(bundle, pc) != 0) {
- pr_err("Couldn't read instruction at %p trying to step\n", pc);
- return;
- }
-
- /* We'll follow the instruction with 2 ill op bundles */
- state->orig_pc = (unsigned long)pc;
- state->next_pc = (unsigned long)(pc + 1);
- state->branch_next_pc = 0;
- state->update = 0;
-
- if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
- /* two wide, check for control flow */
- int opcode = get_Opcode_X1(bundle);
-
- switch (opcode) {
- /* branches */
- case BRANCH_OPCODE_X1:
- {
- s32 offset = signExtend17(get_BrOff_X1(bundle));
-
- /*
- * For branches, we use a rewriting trick to let the
- * hardware evaluate whether the branch is taken or
- * untaken. We record the target offset and then
- * rewrite the branch instruction to target 1 insn
- * ahead if the branch is taken. We then follow the
- * rewritten branch with two bundles, each containing
- * an "ill" instruction. The supervisor examines the
- * pc after the single step code is executed, and if
- * the pc is the first ill instruction, then the
- * branch (if any) was not taken. If the pc is the
- * second ill instruction, then the branch was
- * taken. The new pc is computed for these cases, and
- * inserted into the registers for the thread. If
- * the pc is the start of the single step code, then
- * an exception or interrupt was taken before the
- * code started processing, and the same "original"
- * pc is restored. This change, different from the
- * original implementation, has the advantage of
- * executing a single user instruction.
- */
- state->branch_next_pc = (unsigned long)(pc + offset);
-
- /* rewrite branch offset to go forward one bundle */
- bundle = set_BrOff_X1(bundle, 2);
- }
- break;
-
- /* jumps */
- case JALB_OPCODE_X1:
- case JALF_OPCODE_X1:
- state->update = 1;
- state->next_pc =
- (unsigned long) (pc + get_JOffLong_X1(bundle));
- break;
-
- case JB_OPCODE_X1:
- case JF_OPCODE_X1:
- state->next_pc =
- (unsigned long) (pc + get_JOffLong_X1(bundle));
- bundle = nop_X1(bundle);
- break;
-
- case SPECIAL_0_OPCODE_X1:
- switch (get_RRROpcodeExtension_X1(bundle)) {
- /* jump-register */
- case JALRP_SPECIAL_0_OPCODE_X1:
- case JALR_SPECIAL_0_OPCODE_X1:
- state->update = 1;
- state->next_pc =
- regs->regs[get_SrcA_X1(bundle)];
- break;
-
- case JRP_SPECIAL_0_OPCODE_X1:
- case JR_SPECIAL_0_OPCODE_X1:
- state->next_pc =
- regs->regs[get_SrcA_X1(bundle)];
- bundle = nop_X1(bundle);
- break;
-
- case LNK_SPECIAL_0_OPCODE_X1:
- state->update = 1;
- target_reg = get_Dest_X1(bundle);
- break;
-
- /* stores */
- case SH_SPECIAL_0_OPCODE_X1:
- mem_op = MEMOP_STORE;
- size = 2;
- break;
-
- case SW_SPECIAL_0_OPCODE_X1:
- mem_op = MEMOP_STORE;
- size = 4;
- break;
- }
- break;
-
- /* loads and iret */
- case SHUN_0_OPCODE_X1:
- if (get_UnShOpcodeExtension_X1(bundle) ==
- UN_0_SHUN_0_OPCODE_X1) {
- switch (get_UnOpcodeExtension_X1(bundle)) {
- case LH_UN_0_SHUN_0_OPCODE_X1:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 1;
- break;
-
- case LH_U_UN_0_SHUN_0_OPCODE_X1:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 0;
- break;
-
- case LW_UN_0_SHUN_0_OPCODE_X1:
- mem_op = MEMOP_LOAD;
- size = 4;
- break;
-
- case IRET_UN_0_SHUN_0_OPCODE_X1:
- {
- unsigned long ex0_0 = __insn_mfspr(
- SPR_EX_CONTEXT_0_0);
- unsigned long ex0_1 = __insn_mfspr(
- SPR_EX_CONTEXT_0_1);
- /*
- * Special-case it if we're iret'ing
- * to PL0 again. Otherwise just let
- * it run and it will generate SIGILL.
- */
- if (EX1_PL(ex0_1) == USER_PL) {
- state->next_pc = ex0_0;
- regs->ex1 = ex0_1;
- bundle = nop_X1(bundle);
- }
- }
- }
- }
- break;
-
- /* postincrement operations */
- case IMM_0_OPCODE_X1:
- switch (get_ImmOpcodeExtension_X1(bundle)) {
- case LWADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_LOAD_POSTINCR;
- size = 4;
- break;
-
- case LHADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_LOAD_POSTINCR;
- size = 2;
- sign_ext = 1;
- break;
-
- case LHADD_U_IMM_0_OPCODE_X1:
- mem_op = MEMOP_LOAD_POSTINCR;
- size = 2;
- sign_ext = 0;
- break;
-
- case SWADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_STORE_POSTINCR;
- size = 4;
- break;
-
- case SHADD_IMM_0_OPCODE_X1:
- mem_op = MEMOP_STORE_POSTINCR;
- size = 2;
- break;
-
- default:
- break;
- }
- break;
- }
-
- if (state->update) {
- /*
- * Get an available register. We start with a
- * bitmask with 1's for available registers.
- * We truncate to the low 32 registers since
- * we are guaranteed to have set bits in the
- * low 32 bits, then use ctz to pick the first.
- */
- u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
- (1ULL << get_SrcA_X0(bundle)) |
- (1ULL << get_SrcB_X0(bundle)) |
- (1ULL << target_reg));
- temp_reg = __builtin_ctz(mask);
- state->update_reg = temp_reg;
- state->update_value = regs->regs[temp_reg];
- regs->regs[temp_reg] = (unsigned long) (pc+1);
- regs->flags |= PT_FLAGS_RESTORE_REGS;
- bundle = move_X1(bundle, target_reg, temp_reg);
- }
- } else {
- int opcode = get_Opcode_Y2(bundle);
-
- switch (opcode) {
- /* loads */
- case LH_OPCODE_Y2:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 1;
- break;
-
- case LH_U_OPCODE_Y2:
- mem_op = MEMOP_LOAD;
- size = 2;
- sign_ext = 0;
- break;
-
- case LW_OPCODE_Y2:
- mem_op = MEMOP_LOAD;
- size = 4;
- break;
-
- /* stores */
- case SH_OPCODE_Y2:
- mem_op = MEMOP_STORE;
- size = 2;
- break;
-
- case SW_OPCODE_Y2:
- mem_op = MEMOP_STORE;
- size = 4;
- break;
- }
- }
-
- /*
- * Check if we need to rewrite an unaligned load/store.
- * Returning zero is a special value meaning we generated a signal.
- */
- if (mem_op != MEMOP_NONE && align_ctl >= 0) {
- bundle = rewrite_load_store_unaligned(state, bundle, regs,
- mem_op, size, sign_ext);
- if (bundle == 0)
- return;
- }
-
- /* write the bundle to our execution area */
- buffer = state->buffer;
- err = __put_user(bundle, buffer++);
-
- /*
- * If we're really single-stepping, we take an INT_ILL after.
- * If we're just handling an unaligned access, we can just
- * jump directly back to where we were in user code.
- */
- if (is_single_step) {
- err |= __put_user(__single_step_ill_insn, buffer++);
- err |= __put_user(__single_step_ill_insn, buffer++);
- } else {
- long delta;
-
- if (state->update) {
- /* We have some state to update; do it inline */
- int ha16;
- bundle = __single_step_addli_insn;
- bundle |= create_Dest_X1(state->update_reg);
- bundle |= create_Imm16_X1(state->update_value);
- err |= __put_user(bundle, buffer++);
- bundle = __single_step_auli_insn;
- bundle |= create_Dest_X1(state->update_reg);
- bundle |= create_SrcA_X1(state->update_reg);
- ha16 = (state->update_value + 0x8000) >> 16;
- bundle |= create_Imm16_X1(ha16);
- err |= __put_user(bundle, buffer++);
- state->update = 0;
- }
-
- /* End with a jump back to the next instruction */
- delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
- (unsigned long)buffer) >>
- TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
- bundle = __single_step_j_insn;
- bundle |= create_JOffLong_X1(delta);
- err |= __put_user(bundle, buffer++);
- }
-
- if (err) {
- pr_err("Fault when writing to single-step buffer\n");
- return;
- }
-
- /*
- * Flush the buffer.
- * We do a local flush only, since this is a thread-specific buffer.
- */
- __flush_icache_range((unsigned long)state->buffer,
- (unsigned long)buffer);
-
- /* Indicate enabled */
- state->is_enabled = is_single_step;
- regs->pc = (unsigned long)state->buffer;
-
- /* Fault immediately if we are coming back from a syscall. */
- if (regs->faultnum == INT_SWINT_1)
- regs->pc += 8;
-}
-
-#else
-
-static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
-
-
-/*
- * Called directly on the occasion of an interrupt.
- *
- * If the process doesn't have single step set, then we use this as an
- * opportunity to turn single step off.
- *
- * It has been mentioned that we could conditionally turn off single stepping
- * on each entry into the kernel and rely on single_step_once to turn it
- * on for the processes that matter (as we already do), but this
- * implementation is somewhat more efficient in that we muck with registers
- * once on a bum interrupt rather than on every entry into the kernel.
- *
- * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
- * so we have to run through this process again before we can say that an
- * instruction has executed.
- *
- * swint will set CANCELED, but it's a legitimate instruction. Fortunately
- * it changes the PC. If it hasn't changed, then we know that the interrupt
- * wasn't generated by swint and we'll need to run this process again before
- * we can say an instruction has executed.
- *
- * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
- * on with our lives.
- */
-
-void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
-{
- unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
- struct thread_info *info = (void *)current_thread_info();
- int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
- unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
-
- if (is_single_step == 0) {
- __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
-
- } else if ((*ss_pc != regs->pc) ||
- (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
-
- control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
- control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
- __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
- send_sigtrap(current, regs);
- }
-}
-
-
-/*
- * Called from need_singlestep. Set up the control registers and the enable
- * register, then return back.
- */
-
-void single_step_once(struct pt_regs *regs)
-{
- unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
- unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
-
- *ss_pc = regs->pc;
- control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
- control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
- __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
- __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
-}
-
-void single_step_execve(void)
-{
- /* Nothing */
-}
-
-#endif /* !__tilegx__ */
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
deleted file mode 100644
index 94a62e1197ce..000000000000
--- a/arch/tile/kernel/smp.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * TILE SMP support routines.
- */
-
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/irq_work.h>
-#include <linux/module.h>
-#include <asm/cacheflush.h>
-#include <asm/homecache.h>
-
-/*
- * We write to width and height with a single store in head_NN.S,
- * so make the variable aligned to "long".
- */
-HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
-EXPORT_SYMBOL(smp_topology);
-
-#if CHIP_HAS_IPI()
-static unsigned long __iomem *ipi_mappings[NR_CPUS];
-#endif
-
-/* Does messaging work correctly to the local cpu? */
-bool self_interrupt_ok;
-
-/*
- * Top-level send_IPI*() functions to send messages to other cpus.
- */
-
-/* Set by smp_send_stop() to avoid recursive panics. */
-static int stopping_cpus;
-
-static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
-{
- int sent = 0;
- while (sent < nrecip) {
- int rc = hv_send_message(recip, nrecip,
- (HV_VirtAddr)&tag, sizeof(tag));
- if (rc < 0) {
- if (!stopping_cpus) /* avoid recursive panic */
- panic("hv_send_message returned %d", rc);
- break;
- }
- WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
- sent += rc;
- }
-}
-
-void send_IPI_single(int cpu, int tag)
-{
- HV_Recipient recip = {
- .y = cpu / smp_width,
- .x = cpu % smp_width,
- .state = HV_TO_BE_SENT
- };
- __send_IPI_many(&recip, 1, tag);
-}
-
-void send_IPI_many(const struct cpumask *mask, int tag)
-{
- HV_Recipient recip[NR_CPUS];
- int cpu;
- int nrecip = 0;
- int my_cpu = smp_processor_id();
- for_each_cpu(cpu, mask) {
- HV_Recipient *r;
- BUG_ON(cpu == my_cpu);
- r = &recip[nrecip++];
- r->y = cpu / smp_width;
- r->x = cpu % smp_width;
- r->state = HV_TO_BE_SENT;
- }
- __send_IPI_many(recip, nrecip, tag);
-}
-
-void send_IPI_allbutself(int tag)
-{
- struct cpumask mask;
- cpumask_copy(&mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &mask);
- send_IPI_many(&mask, tag);
-}
-
-/*
- * Functions related to starting/stopping cpus.
- */
-
-/* Handler to start the current cpu. */
-static void smp_start_cpu_interrupt(void)
-{
- get_irq_regs()->pc = start_cpu_function_addr;
-}
-
-/* Handler to stop the current cpu. */
-static void smp_stop_cpu_interrupt(void)
-{
- arch_local_irq_disable_all();
- set_cpu_online(smp_processor_id(), 0);
- for (;;)
- asm("nap; nop");
-}
-
-/* This function calls the 'stop' function on all other CPUs in the system. */
-void smp_send_stop(void)
-{
- stopping_cpus = 1;
- send_IPI_allbutself(MSG_TAG_STOP_CPU);
-}
-
-/* On panic, just wait; we may get an smp_send_stop() later on. */
-void panic_smp_self_stop(void)
-{
- while (1)
- asm("nap; nop");
-}
-
-/*
- * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
- */
-void evaluate_message(int tag)
-{
- switch (tag) {
- case MSG_TAG_START_CPU: /* Start up a cpu */
- smp_start_cpu_interrupt();
- break;
-
- case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
- smp_stop_cpu_interrupt();
- break;
-
- case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
- generic_smp_call_function_interrupt();
- break;
-
- case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
- generic_smp_call_function_single_interrupt();
- break;
-
- case MSG_TAG_IRQ_WORK: /* Invoke IRQ work */
- irq_work_run();
- break;
-
- default:
- panic("Unknown IPI message tag %d", tag);
- break;
- }
-}
-
-
-/*
- * flush_icache_range() code uses smp_call_function().
- */
-
-struct ipi_flush {
- unsigned long start;
- unsigned long end;
-};
-
-static void ipi_flush_icache_range(void *info)
-{
- struct ipi_flush *flush = (struct ipi_flush *) info;
- __flush_icache_range(flush->start, flush->end);
-}
-
-void flush_icache_range(unsigned long start, unsigned long end)
-{
- struct ipi_flush flush = { start, end };
-
- /* If invoked with irqs disabled, we can not issue IPIs. */
- if (irqs_disabled())
- flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
- NULL, NULL, 0);
- else {
- preempt_disable();
- on_each_cpu(ipi_flush_icache_range, &flush, 1);
- preempt_enable();
- }
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-
-#ifdef CONFIG_IRQ_WORK
-void arch_irq_work_raise(void)
-{
- if (arch_irq_work_has_interrupt())
- send_IPI_single(smp_processor_id(), MSG_TAG_IRQ_WORK);
-}
-#endif
-
-
-/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
-static irqreturn_t handle_reschedule_ipi(int irq, void *token)
-{
- __this_cpu_inc(irq_stat.irq_resched_count);
- scheduler_ipi();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction resched_action = {
- .handler = handle_reschedule_ipi,
- .name = "resched",
- .dev_id = handle_reschedule_ipi /* unique token */,
-};
-
-void __init ipi_init(void)
-{
- int cpu = smp_processor_id();
- HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu),
- .state = HV_TO_BE_SENT };
- int tag = MSG_TAG_CALL_FUNCTION_SINGLE;
-
- /*
- * Test if we can message ourselves for arch_irq_work_raise.
- * This functionality is only available in the Tilera hypervisor
- * in versions 4.3.4 and following.
- */
- if (hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)) == 1)
- self_interrupt_ok = true;
- else
- pr_warn("Older hypervisor: disabling fast irq_work_raise\n");
-
-#if CHIP_HAS_IPI()
- /* Map IPI trigger MMIO addresses. */
- for_each_possible_cpu(cpu) {
- HV_Coord tile;
- HV_PTE pte;
- unsigned long offset;
-
- tile.x = cpu_x(cpu);
- tile.y = cpu_y(cpu);
- if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
- panic("Failed to initialize IPI for cpu %d\n", cpu);
-
- offset = PFN_PHYS(pte_pfn(pte));
- ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
- }
-#endif
-
- /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
- tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
- BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
-}
-
-#if CHIP_HAS_IPI()
-
-void smp_send_reschedule(int cpu)
-{
- WARN_ON(cpu_is_offline(cpu));
-
- /*
- * We just want to do an MMIO store. The traditional writeq()
- * functions aren't really correct here, since they're always
- * directed at the PCI shim. For now, just do a raw store,
- * casting away the __iomem attribute.
- */
- ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
-}
-
-#else
-
-void smp_send_reschedule(int cpu)
-{
- HV_Coord coord;
-
- WARN_ON(cpu_is_offline(cpu));
-
- coord.y = cpu_y(cpu);
- coord.x = cpu_x(cpu);
- hv_trigger_ipi(coord, IRQ_RESCHEDULE);
-}
-
-#endif /* CHIP_HAS_IPI() */
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
deleted file mode 100644
index 869c22e57561..000000000000
--- a/arch/tile/kernel/smpboot.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
-#include <linux/kernel_stat.h>
-#include <linux/bootmem.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/percpu.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-#include <asm/sections.h>
-
-/* State of each CPU. */
-static DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
-/* The messaging code jumps to this pointer during boot-up */
-unsigned long start_cpu_function_addr;
-
-/* Called very early during startup to mark boot cpu as online */
-void __init smp_prepare_boot_cpu(void)
-{
- int cpu = smp_processor_id();
- set_cpu_online(cpu, 1);
- set_cpu_present(cpu, 1);
- __this_cpu_write(cpu_state, CPU_ONLINE);
-
- init_messaging();
-}
-
-static void start_secondary(void);
-
-/*
- * Called at the top of init() to launch all the other CPUs.
- * They run free to complete their initialization and then wait
- * until they get an IPI from the boot cpu to come online.
- */
-void __init smp_prepare_cpus(unsigned int max_cpus)
-{
- long rc;
- int cpu, cpu_count;
- int boot_cpu = smp_processor_id();
-
- current_thread_info()->cpu = boot_cpu;
-
- /*
- * Pin this task to the boot CPU while we bring up the others,
- * just to make sure we don't uselessly migrate as they come up.
- */
- rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu));
- if (rc != 0)
- pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc);
-
- /* Print information about disabled and dataplane cpus. */
- print_disabled_cpus();
-
- /*
- * Tell the messaging subsystem how to respond to the
- * startup message. We use a level of indirection to avoid
- * confusing the linker with the fact that the messaging
- * subsystem is calling __init code.
- */
- start_cpu_function_addr = (unsigned long) &online_secondary;
-
- /* Set up thread context for all new processors. */
- cpu_count = 1;
- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
- struct task_struct *idle;
-
- if (cpu == boot_cpu)
- continue;
-
- if (!cpu_possible(cpu)) {
- /*
- * Make this processor do nothing on boot.
- * Note that we don't give the boot_pc function
- * a stack, so it has to be assembly code.
- */
- per_cpu(boot_sp, cpu) = 0;
- per_cpu(boot_pc, cpu) = (unsigned long) smp_nap;
- continue;
- }
-
- /* Create a new idle thread to run start_secondary() */
- idle = fork_idle(cpu);
- if (IS_ERR(idle))
- panic("failed fork for CPU %d", cpu);
- idle->thread.pc = (unsigned long) start_secondary;
-
- /* Make this thread the boot thread for this processor */
- per_cpu(boot_sp, cpu) = task_ksp0(idle);
- per_cpu(boot_pc, cpu) = idle->thread.pc;
-
- ++cpu_count;
- }
- BUG_ON(cpu_count > (max_cpus ? max_cpus : 1));
-
- /* Fire up the other tiles, if any */
- init_cpu_present(cpu_possible_mask);
- if (cpumask_weight(cpu_present_mask) > 1) {
- mb(); /* make sure all data is visible to new processors */
- hv_start_all_tiles();
- }
-}
-
-static __initdata struct cpumask init_affinity;
-
-static __init int reset_init_affinity(void)
-{
- long rc = sched_setaffinity(current->pid, &init_affinity);
- if (rc != 0)
- pr_warn("couldn't reset init affinity (%ld)\n", rc);
- return 0;
-}
-late_initcall(reset_init_affinity);
-
-static struct cpumask cpu_started;
-
-/*
- * Activate a secondary processor. Very minimal; don't add anything
- * to this path without knowing what you're doing, since SMP booting
- * is pretty fragile.
- */
-static void start_secondary(void)
-{
- int cpuid;
-
- preempt_disable();
-
- cpuid = smp_processor_id();
-
- /* Set our thread pointer appropriately. */
- set_my_cpu_offset(__per_cpu_offset[cpuid]);
-
- /*
- * In large machines even this will slow us down, since we
- * will be contending for for the printk spinlock.
- */
- /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
-
- /* Initialize the current asid for our first page table. */
- __this_cpu_write(current_asid, min_asid);
-
- /* Set up this thread as another owner of the init_mm */
- mmgrab(&init_mm);
- current->active_mm = &init_mm;
- if (current->mm)
- BUG();
- enter_lazy_tlb(&init_mm, current);
-
- /* Allow hypervisor messages to be received */
- init_messaging();
- local_irq_enable();
-
- /* Indicate that we're ready to come up. */
- /* Must not do this before we're ready to receive messages */
- if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
- pr_warn("CPU#%d already started!\n", cpuid);
- for (;;)
- local_irq_enable();
- }
-
- smp_nap();
-}
-
-/*
- * Bring a secondary processor online.
- */
-void online_secondary(void)
-{
- /*
- * low-memory mappings have been cleared, flush them from
- * the local TLBs too.
- */
- local_flush_tlb();
-
- BUG_ON(in_interrupt());
-
- /* This must be done before setting cpu_online_mask */
- wmb();
-
- notify_cpu_starting(smp_processor_id());
-
- set_cpu_online(smp_processor_id(), 1);
- __this_cpu_write(cpu_state, CPU_ONLINE);
-
- /* Set up tile-specific state for this cpu. */
- setup_cpu(0);
-
- /* Set up tile-timer clock-event device on this cpu */
- setup_tile_timer();
-
- cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
-}
-
-int __cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
- /* Wait 5s total for all CPUs for them to come online */
- static int timeout;
- for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) {
- if (timeout >= 50000) {
- pr_info("skipping unresponsive cpu%d\n", cpu);
- local_irq_enable();
- return -EIO;
- }
- udelay(100);
- }
-
- local_irq_enable();
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-
- /* Unleash the CPU! */
- send_IPI_single(cpu, MSG_TAG_START_CPU);
- while (!cpumask_test_cpu(cpu, cpu_online_mask))
- cpu_relax();
- return 0;
-}
-
-static void panic_start_cpu(void)
-{
- panic("Received a MSG_START_CPU IPI after boot finished.");
-}
-
-void __init smp_cpus_done(unsigned int max_cpus)
-{
- int cpu, next, rc;
-
- /* Reset the response to a (now illegal) MSG_START_CPU IPI. */
- start_cpu_function_addr = (unsigned long) &panic_start_cpu;
-
- cpumask_copy(&init_affinity, cpu_online_mask);
-
- /*
- * Pin ourselves to a single cpu in the initial affinity set
- * so that kernel mappings for the rootfs are not in the dataplane,
- * if set, and to avoid unnecessary migrating during bringup.
- * Use the last cpu just in case the whole chip has been
- * isolated from the scheduler, to keep init away from likely
- * more useful user code. This also ensures that work scheduled
- * via schedule_delayed_work() in the init routines will land
- * on this cpu.
- */
- for (cpu = cpumask_first(&init_affinity);
- (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids;
- cpu = next)
- ;
- rc = sched_setaffinity(current->pid, cpumask_of(cpu));
- if (rc != 0)
- pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc);
-}
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
deleted file mode 100644
index 94ecbc6676e5..000000000000
--- a/arch/tile/kernel/stack.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task_stack.h>
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/module.h>
-#include <linux/pfn.h>
-#include <linux/kallsyms.h>
-#include <linux/stacktrace.h>
-#include <linux/uaccess.h>
-#include <linux/mmzone.h>
-#include <linux/dcache.h>
-#include <linux/fs.h>
-#include <linux/hardirq.h>
-#include <linux/string.h>
-#include <asm/backtrace.h>
-#include <asm/page.h>
-#include <asm/ucontext.h>
-#include <asm/switch_to.h>
-#include <asm/sigframe.h>
-#include <asm/stack.h>
-#include <asm/vdso.h>
-#include <arch/abi.h>
-#include <arch/interrupts.h>
-
-#define KBT_ONGOING 0 /* Backtrace still ongoing */
-#define KBT_DONE 1 /* Backtrace cleanly completed */
-#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
-#define KBT_LOOP 3 /* Backtrace entered a loop */
-
-/* Is address on the specified kernel stack? */
-static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
-{
- ulong kstack_base = (ulong) kbt->task->stack;
- if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
- return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
- return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
-}
-
-/* Callback for backtracer; basically a glorified memcpy */
-static bool read_memory_func(void *result, unsigned long address,
- unsigned int size, void *vkbt)
-{
- int retval;
- struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
-
- if (address == 0)
- return 0;
- if (__kernel_text_address(address)) {
- /* OK to read kernel code. */
- } else if (address >= PAGE_OFFSET) {
- /* We only tolerate kernel-space reads of this task's stack */
- if (!in_kernel_stack(kbt, address))
- return 0;
- } else if (!kbt->is_current) {
- return 0; /* can't read from other user address spaces */
- }
- pagefault_disable();
- retval = __copy_from_user_inatomic(result,
- (void __user __force *)address,
- size);
- pagefault_enable();
- return (retval == 0);
-}
-
-/* Return a pt_regs pointer for a valid fault handler frame */
-static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
-{
- char fault[64];
- unsigned long sp = kbt->it.sp;
- struct pt_regs *p;
-
- if (sp % sizeof(long) != 0)
- return NULL;
- if (!in_kernel_stack(kbt, sp))
- return NULL;
- if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
- return NULL;
- p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
- if (kbt->verbose) { /* else we aren't going to use it */
- if (p->faultnum == INT_SWINT_1 ||
- p->faultnum == INT_SWINT_1_SIGRETURN)
- snprintf(fault, sizeof(fault),
- "syscall %ld", p->regs[TREG_SYSCALL_NR]);
- else
- snprintf(fault, sizeof(fault),
- "interrupt %ld", p->faultnum);
- }
- if (EX1_PL(p->ex1) == KERNEL_PL &&
- __kernel_text_address(p->pc) &&
- in_kernel_stack(kbt, p->sp) &&
- p->sp >= sp) {
- if (kbt->verbose)
- pr_err(" <%s while in kernel mode>\n", fault);
- } else if (user_mode(p) &&
- p->sp < PAGE_OFFSET && p->sp != 0) {
- if (kbt->verbose)
- pr_err(" <%s while in user mode>\n", fault);
- } else {
- if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
- pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
- p->pc, p->sp, p->ex1);
- return NULL;
- }
- if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
- return NULL;
- return p;
-}
-
-/* Is the iterator pointing to a sigreturn trampoline? */
-static int is_sigreturn(struct KBacktraceIterator *kbt)
-{
- return kbt->task->mm &&
- (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
- (ulong)&__vdso_rt_sigreturn));
-}
-
-/* Return a pt_regs pointer for a valid signal handler frame */
-static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
- struct rt_sigframe* kframe)
-{
- BacktraceIterator *b = &kbt->it;
-
- if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
- b->sp % sizeof(long) == 0) {
- int retval;
- pagefault_disable();
- retval = __copy_from_user_inatomic(
- kframe, (void __user __force *)b->sp,
- sizeof(*kframe));
- pagefault_enable();
- if (retval != 0 ||
- (unsigned int)(kframe->info.si_signo) >= _NSIG)
- return NULL;
- if (kbt->verbose) {
- pr_err(" <received signal %d>\n",
- kframe->info.si_signo);
- }
- return (struct pt_regs *)&kframe->uc.uc_mcontext;
- }
- return NULL;
-}
-
-static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
-{
- struct pt_regs *p;
- struct rt_sigframe kframe;
-
- p = valid_fault_handler(kbt);
- if (p == NULL)
- p = valid_sigframe(kbt, &kframe);
- if (p == NULL)
- return 0;
- backtrace_init(&kbt->it, read_memory_func, kbt,
- p->pc, p->lr, p->sp, p->regs[52]);
- kbt->new_context = 1;
- return 1;
-}
-
-/* Find a frame that isn't a sigreturn, if there is one. */
-static int KBacktraceIterator_next_item_inclusive(
- struct KBacktraceIterator *kbt)
-{
- for (;;) {
- do {
- if (!is_sigreturn(kbt))
- return KBT_ONGOING;
- } while (backtrace_next(&kbt->it));
-
- if (!KBacktraceIterator_restart(kbt))
- return KBT_DONE;
- }
-}
-
-/*
- * If the current sp is on a page different than what we recorded
- * as the top-of-kernel-stack last time we context switched, we have
- * probably blown the stack, and nothing is going to work out well.
- * If we can at least get out a warning, that may help the debug,
- * though we probably won't be able to backtrace into the code that
- * actually did the recursive damage.
- */
-static void validate_stack(struct pt_regs *regs)
-{
- int cpu = raw_smp_processor_id();
- unsigned long ksp0 = get_current_ksp0();
- unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
- unsigned long sp = stack_pointer;
-
- if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
- pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
- " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
- cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
- }
-
- else if (sp < ksp0_base + sizeof(struct thread_info)) {
- pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
- " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
- cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
- }
-}
-
-void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
- struct task_struct *t, struct pt_regs *regs)
-{
- unsigned long pc, lr, sp, r52;
- int is_current;
-
- /*
- * Set up callback information. We grab the kernel stack base
- * so we will allow reads of that address range.
- */
- is_current = (t == NULL || t == current);
- kbt->is_current = is_current;
- if (is_current)
- t = validate_current();
- kbt->task = t;
- kbt->verbose = 0; /* override in caller if desired */
- kbt->profile = 0; /* override in caller if desired */
- kbt->end = KBT_ONGOING;
- kbt->new_context = 1;
- if (is_current)
- validate_stack(regs);
-
- if (regs == NULL) {
- if (is_current || t->state == TASK_RUNNING) {
- /* Can't do this; we need registers */
- kbt->end = KBT_RUNNING;
- return;
- }
- pc = get_switch_to_pc();
- lr = t->thread.pc;
- sp = t->thread.ksp;
- r52 = 0;
- } else {
- pc = regs->pc;
- lr = regs->lr;
- sp = regs->sp;
- r52 = regs->regs[52];
- }
-
- backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
- kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
-}
-EXPORT_SYMBOL(KBacktraceIterator_init);
-
-int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
-{
- return kbt->end != KBT_ONGOING;
-}
-EXPORT_SYMBOL(KBacktraceIterator_end);
-
-void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
-{
- unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
- kbt->new_context = 0;
- if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
- kbt->end = KBT_DONE;
- return;
- }
- kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
- if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
- /* Trapped in a loop; give up. */
- kbt->end = KBT_LOOP;
- }
-}
-EXPORT_SYMBOL(KBacktraceIterator_next);
-
-static void describe_addr(struct KBacktraceIterator *kbt,
- unsigned long address,
- int have_mmap_sem, char *buf, size_t bufsize)
-{
- struct vm_area_struct *vma;
- size_t namelen, remaining;
- unsigned long size, offset, adjust;
- char *p, *modname;
- const char *name;
- int rc;
-
- /*
- * Look one byte back for every caller frame (i.e. those that
- * aren't a new context) so we look up symbol data for the
- * call itself, not the following instruction, which may be on
- * a different line (or in a different function).
- */
- adjust = !kbt->new_context;
- address -= adjust;
-
- if (address >= PAGE_OFFSET) {
- /* Handle kernel symbols. */
- BUG_ON(bufsize < KSYM_NAME_LEN);
- name = kallsyms_lookup(address, &size, &offset,
- &modname, buf);
- if (name == NULL) {
- buf[0] = '\0';
- return;
- }
- namelen = strlen(buf);
- remaining = (bufsize - 1) - namelen;
- p = buf + namelen;
- rc = snprintf(p, remaining, "+%#lx/%#lx ",
- offset + adjust, size);
- if (modname && rc < remaining)
- snprintf(p + rc, remaining - rc, "[%s] ", modname);
- buf[bufsize-1] = '\0';
- return;
- }
-
- /* If we don't have the mmap_sem, we can't show any more info. */
- buf[0] = '\0';
- if (!have_mmap_sem)
- return;
-
- /* Find vma info. */
- vma = find_vma(kbt->task->mm, address);
- if (vma == NULL || address < vma->vm_start) {
- snprintf(buf, bufsize, "[unmapped address] ");
- return;
- }
-
- if (vma->vm_file) {
- p = file_path(vma->vm_file, buf, bufsize);
- if (IS_ERR(p))
- p = "?";
- name = kbasename(p);
- } else {
- name = "anon";
- }
-
- /* Generate a string description of the vma info. */
- namelen = strlen(name);
- remaining = (bufsize - 1) - namelen;
- memmove(buf, name, namelen);
- snprintf(buf + namelen, remaining, "[%lx+%lx] ",
- vma->vm_start, vma->vm_end - vma->vm_start);
-}
-
-/*
- * Avoid possible crash recursion during backtrace. If it happens, it
- * makes it easy to lose the actual root cause of the failure, so we
- * put a simple guard on all the backtrace loops.
- */
-static bool start_backtrace(void)
-{
- if (current_thread_info()->in_backtrace) {
- pr_err("Backtrace requested while in backtrace!\n");
- return false;
- }
- current_thread_info()->in_backtrace = true;
- return true;
-}
-
-static void end_backtrace(void)
-{
- current_thread_info()->in_backtrace = false;
-}
-
-/*
- * This method wraps the backtracer's more generic support.
- * It is only invoked from the architecture-specific code; show_stack()
- * and dump_stack() are architecture-independent entry points.
- */
-void tile_show_stack(struct KBacktraceIterator *kbt)
-{
- int i;
- int have_mmap_sem = 0;
-
- if (!start_backtrace())
- return;
- kbt->verbose = 1;
- i = 0;
- for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
- char namebuf[KSYM_NAME_LEN+100];
- unsigned long address = kbt->it.pc;
-
- /*
- * Try to acquire the mmap_sem as we pass into userspace.
- * If we're in an interrupt context, don't even try, since
- * it's not safe to call e.g. d_path() from an interrupt,
- * since it uses spin locks without disabling interrupts.
- * Note we test "kbt->task == current", not "kbt->is_current",
- * since we're checking that "current" will work in d_path().
- */
- if (kbt->task == current && address < PAGE_OFFSET &&
- !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
- have_mmap_sem =
- down_read_trylock(&kbt->task->mm->mmap_sem);
- }
-
- describe_addr(kbt, address, have_mmap_sem,
- namebuf, sizeof(namebuf));
-
- pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
- i++, address, namebuf, (unsigned long)(kbt->it.sp));
-
- if (i >= 100) {
- pr_err("Stack dump truncated (%d frames)\n", i);
- break;
- }
- }
- if (kbt->end == KBT_LOOP)
- pr_err("Stack dump stopped; next frame identical to this one\n");
- if (have_mmap_sem)
- up_read(&kbt->task->mm->mmap_sem);
- end_backtrace();
-}
-EXPORT_SYMBOL(tile_show_stack);
-
-static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
- ulong pc, ulong lr, ulong sp, ulong r52)
-{
- memset(regs, 0, sizeof(struct pt_regs));
- regs->pc = pc;
- regs->lr = lr;
- regs->sp = sp;
- regs->regs[52] = r52;
- return regs;
-}
-
-/* Deprecated function currently only used by kernel_double_fault(). */
-void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
-{
- struct KBacktraceIterator kbt;
- struct pt_regs regs;
-
- regs_to_pt_regs(&regs, pc, lr, sp, r52);
- KBacktraceIterator_init(&kbt, NULL, &regs);
- tile_show_stack(&kbt);
-}
-
-/* This is called from KBacktraceIterator_init_current() */
-void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
- ulong lr, ulong sp, ulong r52)
-{
- struct pt_regs regs;
- KBacktraceIterator_init(kbt, NULL,
- regs_to_pt_regs(&regs, pc, lr, sp, r52));
-}
-
-/*
- * Called from sched_show_task() with task != NULL, or dump_stack()
- * with task == NULL. The esp argument is always NULL.
- */
-void show_stack(struct task_struct *task, unsigned long *esp)
-{
- struct KBacktraceIterator kbt;
- if (task == NULL || task == current) {
- KBacktraceIterator_init_current(&kbt);
- KBacktraceIterator_next(&kbt); /* don't show first frame */
- } else {
- KBacktraceIterator_init(&kbt, task, NULL);
- }
- tile_show_stack(&kbt);
-}
-
-#ifdef CONFIG_STACKTRACE
-
-/* Support generic Linux stack API too */
-
-static void save_stack_trace_common(struct task_struct *task,
- struct pt_regs *regs,
- bool user,
- struct stack_trace *trace)
-{
- struct KBacktraceIterator kbt;
- int skip = trace->skip;
- int i = 0;
-
- if (!start_backtrace())
- goto done;
- if (regs != NULL) {
- KBacktraceIterator_init(&kbt, NULL, regs);
- } else if (task == NULL || task == current) {
- KBacktraceIterator_init_current(&kbt);
- skip++; /* don't show KBacktraceIterator_init_current */
- } else {
- KBacktraceIterator_init(&kbt, task, NULL);
- }
- for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
- if (skip) {
- --skip;
- continue;
- }
- if (i >= trace->max_entries ||
- (!user && kbt.it.pc < PAGE_OFFSET))
- break;
- trace->entries[i++] = kbt.it.pc;
- }
- end_backtrace();
-done:
- if (i < trace->max_entries)
- trace->entries[i++] = ULONG_MAX;
- trace->nr_entries = i;
-}
-
-void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
-{
- save_stack_trace_common(task, NULL, false, trace);
-}
-EXPORT_SYMBOL(save_stack_trace_tsk);
-
-void save_stack_trace(struct stack_trace *trace)
-{
- save_stack_trace_common(NULL, NULL, false, trace);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- save_stack_trace_common(NULL, regs, false, trace);
-}
-
-void save_stack_trace_user(struct stack_trace *trace)
-{
- /* Trace user stack if we are not a kernel thread. */
- if (current->mm)
- save_stack_trace_common(NULL, task_pt_regs(current),
- true, trace);
- else if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-}
-#endif
-
-/* In entry.S */
-EXPORT_SYMBOL(KBacktraceIterator_init_current);
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
deleted file mode 100644
index c7418dcbbb08..000000000000
--- a/arch/tile/kernel/sys.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the Linux/TILE
- * platform.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/mempolicy.h>
-#include <linux/binfmts.h>
-#include <linux/fs.h>
-#include <linux/compat.h>
-#include <linux/uaccess.h>
-#include <linux/signal.h>
-#include <asm/syscalls.h>
-#include <asm/pgtable.h>
-#include <asm/homecache.h>
-#include <asm/cachectl.h>
-#include <asm/byteorder.h>
-#include <arch/chip.h>
-
-SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
- unsigned long, flags)
-{
- /* DCACHE is not particularly effective if not bound to one cpu. */
- if (flags & DCACHE)
- homecache_evict(cpumask_of(raw_smp_processor_id()));
-
- if (flags & ICACHE)
- flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
- 0, 0, 0, NULL, NULL, 0);
- return 0;
-}
-
-/*
- * Syscalls that pass 64-bit values on 32-bit systems normally
- * pass them as (low,high) word packed into the immediately adjacent
- * registers. If the low word naturally falls on an even register,
- * our ABI makes it work correctly; if not, we adjust it here.
- * Handling it here means we don't have to fix uclibc AND glibc AND
- * any other standard libcs we want to support.
- */
-
-#if !defined(__tilegx__) || defined(CONFIG_COMPAT)
-
-#ifdef __BIG_ENDIAN
-#define SYSCALL_PAIR(name) u32 name ## _hi, u32 name ## _lo
-#else
-#define SYSCALL_PAIR(name) u32 name ## _lo, u32 name ## _hi
-#endif
-
-ssize_t sys32_readahead(int fd, SYSCALL_PAIR(offset), u32 count)
-{
- return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count);
-}
-
-int sys32_fadvise64_64(int fd, SYSCALL_PAIR(offset),
- SYSCALL_PAIR(len), int advice)
-{
- return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo,
- ((loff_t)len_hi << 32) | len_lo, advice);
-}
-
-#endif /* 32-bit syscall wrappers */
-
-/* Note: used by the compat code even in 64-bit Linux. */
-SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags,
- unsigned long, fd, unsigned long, off_4k)
-{
-#define PAGE_ADJUST (PAGE_SHIFT - 12)
- if (off_4k & ((1 << PAGE_ADJUST) - 1))
- return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- off_4k >> PAGE_ADJUST);
-}
-
-#ifdef __tilegx__
-SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
- unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, offset)
-{
- if (offset & ((1 << PAGE_SHIFT) - 1))
- return -EINVAL;
- return sys_mmap_pgoff(addr, len, prot, flags, fd,
- offset >> PAGE_SHIFT);
-}
-#endif
-
-
-/* Provide the actual syscall number to call mapping. */
-#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
-
-#ifndef __tilegx__
-/* See comments at the top of the file. */
-#define sys_fadvise64_64 sys32_fadvise64_64
-#define sys_readahead sys32_readahead
-#endif
-
-/* Call the assembly trampolines where necessary. */
-#undef sys_rt_sigreturn
-#define sys_rt_sigreturn _sys_rt_sigreturn
-#define sys_clone _sys_clone
-
-/*
- * Note that we can't include <linux/unistd.h> here since the header
- * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
- */
-void *sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls-1] = sys_ni_syscall,
-#include <asm/unistd.h>
-};
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
deleted file mode 100644
index b09456a3d77a..000000000000
--- a/arch/tile/kernel/sysfs.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * /sys entry support.
- */
-
-#include <linux/device.h>
-#include <linux/cpu.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/stat.h>
-#include <hv/hypervisor.h>
-
-/* Return a string queried from the hypervisor, truncated to page size. */
-static ssize_t get_hv_confstr(char *page, int query)
-{
- ssize_t n = hv_confstr(query, (unsigned long)page, PAGE_SIZE - 1);
- n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1) - 1;
- if (n)
- page[n++] = '\n';
- page[n] = '\0';
- return n;
-}
-
-static ssize_t chip_width_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return sprintf(page, "%u\n", smp_width);
-}
-static DEVICE_ATTR_RO(chip_width);
-
-static ssize_t chip_height_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return sprintf(page, "%u\n", smp_height);
-}
-static DEVICE_ATTR_RO(chip_height);
-
-static ssize_t chip_serial_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM);
-}
-static DEVICE_ATTR_RO(chip_serial);
-
-static ssize_t chip_revision_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return get_hv_confstr(page, HV_CONFSTR_CHIP_REV);
-}
-static DEVICE_ATTR_RO(chip_revision);
-
-
-static ssize_t type_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return sprintf(page, "tilera\n");
-}
-static DEVICE_ATTR_RO(type);
-
-#define HV_CONF_ATTR(name, conf) \
- static ssize_t name ## _show(struct device *dev, \
- struct device_attribute *attr, \
- char *page) \
- { \
- return get_hv_confstr(page, conf); \
- } \
- static DEVICE_ATTR(name, 0444, name ## _show, NULL);
-
-HV_CONF_ATTR(version, HV_CONFSTR_HV_SW_VER)
-HV_CONF_ATTR(config_version, HV_CONFSTR_HV_CONFIG_VER)
-
-HV_CONF_ATTR(board_part, HV_CONFSTR_BOARD_PART_NUM)
-HV_CONF_ATTR(board_serial, HV_CONFSTR_BOARD_SERIAL_NUM)
-HV_CONF_ATTR(board_revision, HV_CONFSTR_BOARD_REV)
-HV_CONF_ATTR(board_description, HV_CONFSTR_BOARD_DESC)
-HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
-HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
-HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
-HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
-HV_CONF_ATTR(cpumod_part, HV_CONFSTR_CPUMOD_PART_NUM)
-HV_CONF_ATTR(cpumod_serial, HV_CONFSTR_CPUMOD_SERIAL_NUM)
-HV_CONF_ATTR(cpumod_revision, HV_CONFSTR_CPUMOD_REV)
-HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
-HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
-
-static struct attribute *board_attrs[] = {
- &dev_attr_board_part.attr,
- &dev_attr_board_serial.attr,
- &dev_attr_board_revision.attr,
- &dev_attr_board_description.attr,
- &dev_attr_mezz_part.attr,
- &dev_attr_mezz_serial.attr,
- &dev_attr_mezz_revision.attr,
- &dev_attr_mezz_description.attr,
- &dev_attr_cpumod_part.attr,
- &dev_attr_cpumod_serial.attr,
- &dev_attr_cpumod_revision.attr,
- &dev_attr_cpumod_description.attr,
- &dev_attr_switch_control.attr,
- NULL
-};
-
-static struct attribute_group board_attr_group = {
- .name = "board",
- .attrs = board_attrs,
-};
-
-
-static struct bin_attribute hvconfig_bin;
-
-static ssize_t
-hvconfig_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- static size_t size;
-
- /* Lazily learn the true size (minus the trailing NUL). */
- if (size == 0)
- size = hv_confstr(HV_CONFSTR_HV_CONFIG, 0, 0) - 1;
-
- /* Check and adjust input parameters. */
- if (off > size)
- return -EINVAL;
- if (count > size - off)
- count = size - off;
-
- if (count) {
- /* Get a copy of the hvc and copy out the relevant portion. */
- char *hvc;
-
- size = off + count;
- hvc = kmalloc(size, GFP_KERNEL);
- if (hvc == NULL)
- return -ENOMEM;
- hv_confstr(HV_CONFSTR_HV_CONFIG, (unsigned long)hvc, size);
- memcpy(buf, hvc + off, count);
- kfree(hvc);
- }
-
- return count;
-}
-
-static ssize_t hv_stats_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- int cpu = dev->id;
- long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
-
- ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
- (unsigned long)page, PAGE_SIZE - 1,
- lotar, 0);
- n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1);
- page[n] = '\0';
- return n;
-}
-
-static ssize_t hv_stats_store(struct device *dev,
- struct device_attribute *attr,
- const char *page,
- size_t count)
-{
- int cpu = dev->id;
- long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
-
- ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS, 0, 0, lotar, 1);
- return n < 0 ? n : count;
-}
-
-static DEVICE_ATTR_RW(hv_stats);
-
-static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif)
-{
- int err, cpu = dev->id;
-
- if (!cpu_online(cpu))
- return 0;
-
- err = sysfs_create_file(&dev->kobj, &dev_attr_hv_stats.attr);
-
- return err;
-}
-
-static void hv_stats_device_remove(struct device *dev,
- struct subsys_interface *sif)
-{
- int cpu = dev->id;
-
- if (cpu_online(cpu))
- sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
-}
-
-
-static struct subsys_interface hv_stats_interface = {
- .name = "hv_stats",
- .subsys = &cpu_subsys,
- .add_dev = hv_stats_device_add,
- .remove_dev = hv_stats_device_remove,
-};
-
-static int __init create_sysfs_entries(void)
-{
- int err = 0;
-
-#define create_cpu_attr(name) \
- if (!err) \
- err = device_create_file(cpu_subsys.dev_root, &dev_attr_##name);
- create_cpu_attr(chip_width);
- create_cpu_attr(chip_height);
- create_cpu_attr(chip_serial);
- create_cpu_attr(chip_revision);
-
-#define create_hv_attr(name) \
- if (!err) \
- err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name.attr);
- create_hv_attr(type);
- create_hv_attr(version);
- create_hv_attr(config_version);
-
- if (!err)
- err = sysfs_create_group(hypervisor_kobj, &board_attr_group);
-
- if (!err) {
- sysfs_bin_attr_init(&hvconfig_bin);
- hvconfig_bin.attr.name = "hvconfig";
- hvconfig_bin.attr.mode = S_IRUGO;
- hvconfig_bin.read = hvconfig_bin_read;
- hvconfig_bin.size = PAGE_SIZE;
- err = sysfs_create_bin_file(hypervisor_kobj, &hvconfig_bin);
- }
-
- if (!err) {
- /*
- * Don't bother adding the hv_stats files on each CPU if
- * our hypervisor doesn't supply statistics.
- */
- int cpu = raw_smp_processor_id();
- long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
- char dummy;
- ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
- (unsigned long) &dummy, 1,
- lotar, 0);
- if (n >= 0)
- err = subsys_interface_register(&hv_stats_interface);
- }
-
- return err;
-}
-subsys_initcall(create_sysfs_entries);
diff --git a/arch/tile/kernel/tile-desc_32.c b/arch/tile/kernel/tile-desc_32.c
deleted file mode 100644
index dd7bd1d8563c..000000000000
--- a/arch/tile/kernel/tile-desc_32.c
+++ /dev/null
@@ -1,2605 +0,0 @@
-/* TILEPro opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */
-#define BFD_RELOC(x) -1
-
-/* Special registers. */
-#define TREG_LR 55
-#define TREG_SN 56
-#define TREG_ZERO 63
-
-#include <linux/stddef.h>
-#include <asm/tile-desc.h>
-
-const struct tilepro_opcode tilepro_opcodes[395] =
-{
- { "bpt", TILEPRO_OPC_BPT, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "info", TILEPRO_OPC_INFO, 0xf, 1, TREG_ZERO, 1,
- { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } },
- },
- { "infol", TILEPRO_OPC_INFOL, 0x3, 1, TREG_ZERO, 1,
- { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "j", TILEPRO_OPC_J, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } },
- },
- { "jal", TILEPRO_OPC_JAL, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } },
- },
- { "move", TILEPRO_OPC_MOVE, 0xf, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 9, 10 }, { 11, 12 }, { 13, 14 }, { 0, } },
- },
- { "move.sn", TILEPRO_OPC_MOVE_SN, 0x3, 2, TREG_SN, 1,
- { { 7, 8 }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "movei", TILEPRO_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1,
- { { 7, 0 }, { 9, 1 }, { 11, 2 }, { 13, 3 }, { 0, } },
- },
- { "movei.sn", TILEPRO_OPC_MOVEI_SN, 0x3, 2, TREG_SN, 1,
- { { 7, 0 }, { 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "moveli", TILEPRO_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1,
- { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "moveli.sn", TILEPRO_OPC_MOVELI_SN, 0x3, 2, TREG_SN, 1,
- { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "movelis", TILEPRO_OPC_MOVELIS, 0x3, 2, TREG_SN, 1,
- { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch", TILEPRO_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 15 } },
- },
- { "raise", TILEPRO_OPC_RAISE, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "add", TILEPRO_OPC_ADD, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "add.sn", TILEPRO_OPC_ADD_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addb", TILEPRO_OPC_ADDB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addb.sn", TILEPRO_OPC_ADDB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addbs_u", TILEPRO_OPC_ADDBS_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addbs_u.sn", TILEPRO_OPC_ADDBS_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addh", TILEPRO_OPC_ADDH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addh.sn", TILEPRO_OPC_ADDH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addhs", TILEPRO_OPC_ADDHS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addhs.sn", TILEPRO_OPC_ADDHS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "addi", TILEPRO_OPC_ADDI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "addi.sn", TILEPRO_OPC_ADDI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addib", TILEPRO_OPC_ADDIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addib.sn", TILEPRO_OPC_ADDIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addih", TILEPRO_OPC_ADDIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addih.sn", TILEPRO_OPC_ADDIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "addli", TILEPRO_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addli.sn", TILEPRO_OPC_ADDLI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addlis", TILEPRO_OPC_ADDLIS, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "adds", TILEPRO_OPC_ADDS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "adds.sn", TILEPRO_OPC_ADDS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffb_u", TILEPRO_OPC_ADIFFB_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffb_u.sn", TILEPRO_OPC_ADIFFB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffh", TILEPRO_OPC_ADIFFH, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "adiffh.sn", TILEPRO_OPC_ADIFFH_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "and", TILEPRO_OPC_AND, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "and.sn", TILEPRO_OPC_AND_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "andi", TILEPRO_OPC_ANDI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "andi.sn", TILEPRO_OPC_ANDI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "auli", TILEPRO_OPC_AULI, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "avgb_u", TILEPRO_OPC_AVGB_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "avgb_u.sn", TILEPRO_OPC_AVGB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "avgh", TILEPRO_OPC_AVGH, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "avgh.sn", TILEPRO_OPC_AVGH_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bbns", TILEPRO_OPC_BBNS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbns.sn", TILEPRO_OPC_BBNS_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbnst", TILEPRO_OPC_BBNST, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbnst.sn", TILEPRO_OPC_BBNST_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbs", TILEPRO_OPC_BBS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbs.sn", TILEPRO_OPC_BBS_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbst", TILEPRO_OPC_BBST, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bbst.sn", TILEPRO_OPC_BBST_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgez", TILEPRO_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgez.sn", TILEPRO_OPC_BGEZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgezt", TILEPRO_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgezt.sn", TILEPRO_OPC_BGEZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgz", TILEPRO_OPC_BGZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgz.sn", TILEPRO_OPC_BGZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgzt", TILEPRO_OPC_BGZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgzt.sn", TILEPRO_OPC_BGZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bitx", TILEPRO_OPC_BITX, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "bitx.sn", TILEPRO_OPC_BITX_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "blez", TILEPRO_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blez.sn", TILEPRO_OPC_BLEZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blezt", TILEPRO_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blezt.sn", TILEPRO_OPC_BLEZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blz", TILEPRO_OPC_BLZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blz.sn", TILEPRO_OPC_BLZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blzt", TILEPRO_OPC_BLZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blzt.sn", TILEPRO_OPC_BLZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnz", TILEPRO_OPC_BNZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnz.sn", TILEPRO_OPC_BNZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnzt", TILEPRO_OPC_BNZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnzt.sn", TILEPRO_OPC_BNZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bytex", TILEPRO_OPC_BYTEX, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "bytex.sn", TILEPRO_OPC_BYTEX_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bz", TILEPRO_OPC_BZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bz.sn", TILEPRO_OPC_BZ_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bzt", TILEPRO_OPC_BZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bzt.sn", TILEPRO_OPC_BZT_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "clz", TILEPRO_OPC_CLZ, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "clz.sn", TILEPRO_OPC_CLZ_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_32", TILEPRO_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_32.sn", TILEPRO_OPC_CRC32_32_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_8", TILEPRO_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_8.sn", TILEPRO_OPC_CRC32_8_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "ctz", TILEPRO_OPC_CTZ, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "ctz.sn", TILEPRO_OPC_CTZ_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "drain", TILEPRO_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "dtlbpr", TILEPRO_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "dword_align", TILEPRO_OPC_DWORD_ALIGN, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "dword_align.sn", TILEPRO_OPC_DWORD_ALIGN_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "finv", TILEPRO_OPC_FINV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "flush", TILEPRO_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "fnop", TILEPRO_OPC_FNOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "icoh", TILEPRO_OPC_ICOH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "ill", TILEPRO_OPC_ILL, 0xa, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { }, { 0, } },
- },
- { "inthb", TILEPRO_OPC_INTHB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inthb.sn", TILEPRO_OPC_INTHB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inthh", TILEPRO_OPC_INTHH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inthh.sn", TILEPRO_OPC_INTHH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlb", TILEPRO_OPC_INTLB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlb.sn", TILEPRO_OPC_INTLB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlh", TILEPRO_OPC_INTLH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "intlh.sn", TILEPRO_OPC_INTLH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "inv", TILEPRO_OPC_INV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "iret", TILEPRO_OPC_IRET, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "jalb", TILEPRO_OPC_JALB, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalf", TILEPRO_OPC_JALF, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalr", TILEPRO_OPC_JALR, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalrp", TILEPRO_OPC_JALRP, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "jb", TILEPRO_OPC_JB, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jf", TILEPRO_OPC_JF, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } },
- },
- { "jr", TILEPRO_OPC_JR, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "jrp", TILEPRO_OPC_JRP, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lb", TILEPRO_OPC_LB, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lb.sn", TILEPRO_OPC_LB_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lb_u", TILEPRO_OPC_LB_U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lb_u.sn", TILEPRO_OPC_LB_U_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd", TILEPRO_OPC_LBADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd.sn", TILEPRO_OPC_LBADD_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd_u", TILEPRO_OPC_LBADD_U, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lbadd_u.sn", TILEPRO_OPC_LBADD_U_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lh", TILEPRO_OPC_LH, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lh.sn", TILEPRO_OPC_LH_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lh_u", TILEPRO_OPC_LH_U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lh_u.sn", TILEPRO_OPC_LH_U_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd", TILEPRO_OPC_LHADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd.sn", TILEPRO_OPC_LHADD_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd_u", TILEPRO_OPC_LHADD_U, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lhadd_u.sn", TILEPRO_OPC_LHADD_U_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lnk", TILEPRO_OPC_LNK, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "lnk.sn", TILEPRO_OPC_LNK_SN, 0x2, 1, TREG_SN, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "lw", TILEPRO_OPC_LW, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } },
- },
- { "lw.sn", TILEPRO_OPC_LW_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lw_na", TILEPRO_OPC_LW_NA, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lw_na.sn", TILEPRO_OPC_LW_NA_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd", TILEPRO_OPC_LWADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd.sn", TILEPRO_OPC_LWADD_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd_na", TILEPRO_OPC_LWADD_NA, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lwadd_na.sn", TILEPRO_OPC_LWADD_NA_SN, 0x2, 3, TREG_SN, 1,
- { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxb_u", TILEPRO_OPC_MAXB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxb_u.sn", TILEPRO_OPC_MAXB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxh", TILEPRO_OPC_MAXH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxh.sn", TILEPRO_OPC_MAXH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxib_u", TILEPRO_OPC_MAXIB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxib_u.sn", TILEPRO_OPC_MAXIB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxih", TILEPRO_OPC_MAXIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "maxih.sn", TILEPRO_OPC_MAXIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "mf", TILEPRO_OPC_MF, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "mfspr", TILEPRO_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 25 }, { 0, }, { 0, }, { 0, } },
- },
- { "minb_u", TILEPRO_OPC_MINB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minb_u.sn", TILEPRO_OPC_MINB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minh", TILEPRO_OPC_MINH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minh.sn", TILEPRO_OPC_MINH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "minib_u", TILEPRO_OPC_MINIB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "minib_u.sn", TILEPRO_OPC_MINIB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "minih", TILEPRO_OPC_MINIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "minih.sn", TILEPRO_OPC_MINIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "mm", TILEPRO_OPC_MM, 0x3, 5, TREG_ZERO, 1,
- { { 7, 8, 16, 26, 27 }, { 9, 10, 17, 28, 29 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnz", TILEPRO_OPC_MNZ, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "mnz.sn", TILEPRO_OPC_MNZ_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzb", TILEPRO_OPC_MNZB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzb.sn", TILEPRO_OPC_MNZB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzh", TILEPRO_OPC_MNZH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mnzh.sn", TILEPRO_OPC_MNZH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mtspr", TILEPRO_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 30, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_ss", TILEPRO_OPC_MULHH_SS, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhh_ss.sn", TILEPRO_OPC_MULHH_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_su", TILEPRO_OPC_MULHH_SU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_su.sn", TILEPRO_OPC_MULHH_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhh_uu", TILEPRO_OPC_MULHH_UU, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhh_uu.sn", TILEPRO_OPC_MULHH_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_ss", TILEPRO_OPC_MULHHA_SS, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhha_ss.sn", TILEPRO_OPC_MULHHA_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_su", TILEPRO_OPC_MULHHA_SU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_su.sn", TILEPRO_OPC_MULHHA_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhha_uu", TILEPRO_OPC_MULHHA_UU, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhha_uu.sn", TILEPRO_OPC_MULHHA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhhsa_uu", TILEPRO_OPC_MULHHSA_UU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhhsa_uu.sn", TILEPRO_OPC_MULHHSA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_ss", TILEPRO_OPC_MULHL_SS, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_ss.sn", TILEPRO_OPC_MULHL_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_su", TILEPRO_OPC_MULHL_SU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_su.sn", TILEPRO_OPC_MULHL_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_us", TILEPRO_OPC_MULHL_US, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_us.sn", TILEPRO_OPC_MULHL_US_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_uu", TILEPRO_OPC_MULHL_UU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhl_uu.sn", TILEPRO_OPC_MULHL_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_ss", TILEPRO_OPC_MULHLA_SS, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_ss.sn", TILEPRO_OPC_MULHLA_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_su", TILEPRO_OPC_MULHLA_SU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_su.sn", TILEPRO_OPC_MULHLA_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_us", TILEPRO_OPC_MULHLA_US, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_us.sn", TILEPRO_OPC_MULHLA_US_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_uu", TILEPRO_OPC_MULHLA_UU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhla_uu.sn", TILEPRO_OPC_MULHLA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulhlsa_uu", TILEPRO_OPC_MULHLSA_UU, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulhlsa_uu.sn", TILEPRO_OPC_MULHLSA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_ss", TILEPRO_OPC_MULLL_SS, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulll_ss.sn", TILEPRO_OPC_MULLL_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_su", TILEPRO_OPC_MULLL_SU, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_su.sn", TILEPRO_OPC_MULLL_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulll_uu", TILEPRO_OPC_MULLL_UU, 0x5, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } },
- },
- { "mulll_uu.sn", TILEPRO_OPC_MULLL_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_ss", TILEPRO_OPC_MULLLA_SS, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mullla_ss.sn", TILEPRO_OPC_MULLLA_SS_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_su", TILEPRO_OPC_MULLLA_SU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_su.sn", TILEPRO_OPC_MULLLA_SU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mullla_uu", TILEPRO_OPC_MULLLA_UU, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mullla_uu.sn", TILEPRO_OPC_MULLLA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulllsa_uu", TILEPRO_OPC_MULLLSA_UU, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mulllsa_uu.sn", TILEPRO_OPC_MULLLSA_UU_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mvnz", TILEPRO_OPC_MVNZ, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mvnz.sn", TILEPRO_OPC_MVNZ_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mvz", TILEPRO_OPC_MVZ, 0x5, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } },
- },
- { "mvz.sn", TILEPRO_OPC_MVZ_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mz", TILEPRO_OPC_MZ, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "mz.sn", TILEPRO_OPC_MZ_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzb", TILEPRO_OPC_MZB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzb.sn", TILEPRO_OPC_MZB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzh", TILEPRO_OPC_MZH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "mzh.sn", TILEPRO_OPC_MZH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "nap", TILEPRO_OPC_NAP, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "nop", TILEPRO_OPC_NOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "nor", TILEPRO_OPC_NOR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "nor.sn", TILEPRO_OPC_NOR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "or", TILEPRO_OPC_OR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "or.sn", TILEPRO_OPC_OR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "ori", TILEPRO_OPC_ORI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "ori.sn", TILEPRO_OPC_ORI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "packbs_u", TILEPRO_OPC_PACKBS_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packbs_u.sn", TILEPRO_OPC_PACKBS_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhb", TILEPRO_OPC_PACKHB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhb.sn", TILEPRO_OPC_PACKHB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhs", TILEPRO_OPC_PACKHS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packhs.sn", TILEPRO_OPC_PACKHS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packlb", TILEPRO_OPC_PACKLB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "packlb.sn", TILEPRO_OPC_PACKLB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "pcnt", TILEPRO_OPC_PCNT, 0x5, 2, TREG_ZERO, 1,
- { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } },
- },
- { "pcnt.sn", TILEPRO_OPC_PCNT_SN, 0x1, 2, TREG_SN, 1,
- { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "rl", TILEPRO_OPC_RL, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "rl.sn", TILEPRO_OPC_RL_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "rli", TILEPRO_OPC_RLI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "rli.sn", TILEPRO_OPC_RLI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "s1a", TILEPRO_OPC_S1A, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "s1a.sn", TILEPRO_OPC_S1A_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "s2a", TILEPRO_OPC_S2A, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "s2a.sn", TILEPRO_OPC_S2A_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "s3a", TILEPRO_OPC_S3A, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "s3a.sn", TILEPRO_OPC_S3A_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sadab_u", TILEPRO_OPC_SADAB_U, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadab_u.sn", TILEPRO_OPC_SADAB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah", TILEPRO_OPC_SADAH, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah.sn", TILEPRO_OPC_SADAH_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah_u", TILEPRO_OPC_SADAH_U, 0x1, 3, TREG_ZERO, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadah_u.sn", TILEPRO_OPC_SADAH_U_SN, 0x1, 3, TREG_SN, 1,
- { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadb_u", TILEPRO_OPC_SADB_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadb_u.sn", TILEPRO_OPC_SADB_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh", TILEPRO_OPC_SADH, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh.sn", TILEPRO_OPC_SADH_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh_u", TILEPRO_OPC_SADH_U, 0x1, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sadh_u.sn", TILEPRO_OPC_SADH_U_SN, 0x1, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "sb", TILEPRO_OPC_SB, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } },
- },
- { "sbadd", TILEPRO_OPC_SBADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } },
- },
- { "seq", TILEPRO_OPC_SEQ, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "seq.sn", TILEPRO_OPC_SEQ_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqb", TILEPRO_OPC_SEQB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqb.sn", TILEPRO_OPC_SEQB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqh", TILEPRO_OPC_SEQH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqh.sn", TILEPRO_OPC_SEQH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqi", TILEPRO_OPC_SEQI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "seqi.sn", TILEPRO_OPC_SEQI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqib", TILEPRO_OPC_SEQIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqib.sn", TILEPRO_OPC_SEQIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqih", TILEPRO_OPC_SEQIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "seqih.sn", TILEPRO_OPC_SEQIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sh", TILEPRO_OPC_SH, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } },
- },
- { "shadd", TILEPRO_OPC_SHADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } },
- },
- { "shl", TILEPRO_OPC_SHL, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "shl.sn", TILEPRO_OPC_SHL_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlb", TILEPRO_OPC_SHLB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlb.sn", TILEPRO_OPC_SHLB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlh", TILEPRO_OPC_SHLH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlh.sn", TILEPRO_OPC_SHLH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shli", TILEPRO_OPC_SHLI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "shli.sn", TILEPRO_OPC_SHLI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlib", TILEPRO_OPC_SHLIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlib.sn", TILEPRO_OPC_SHLIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlih", TILEPRO_OPC_SHLIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlih.sn", TILEPRO_OPC_SHLIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shr", TILEPRO_OPC_SHR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "shr.sn", TILEPRO_OPC_SHR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrb", TILEPRO_OPC_SHRB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrb.sn", TILEPRO_OPC_SHRB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrh", TILEPRO_OPC_SHRH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrh.sn", TILEPRO_OPC_SHRH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shri", TILEPRO_OPC_SHRI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "shri.sn", TILEPRO_OPC_SHRI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrib", TILEPRO_OPC_SHRIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrib.sn", TILEPRO_OPC_SHRIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrih", TILEPRO_OPC_SHRIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrih.sn", TILEPRO_OPC_SHRIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "slt", TILEPRO_OPC_SLT, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slt.sn", TILEPRO_OPC_SLT_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slt_u", TILEPRO_OPC_SLT_U, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slt_u.sn", TILEPRO_OPC_SLT_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb", TILEPRO_OPC_SLTB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb.sn", TILEPRO_OPC_SLTB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb_u", TILEPRO_OPC_SLTB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltb_u.sn", TILEPRO_OPC_SLTB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slte", TILEPRO_OPC_SLTE, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slte.sn", TILEPRO_OPC_SLTE_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slte_u", TILEPRO_OPC_SLTE_U, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "slte_u.sn", TILEPRO_OPC_SLTE_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb", TILEPRO_OPC_SLTEB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb.sn", TILEPRO_OPC_SLTEB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb_u", TILEPRO_OPC_SLTEB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteb_u.sn", TILEPRO_OPC_SLTEB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh", TILEPRO_OPC_SLTEH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh.sn", TILEPRO_OPC_SLTEH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh_u", TILEPRO_OPC_SLTEH_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slteh_u.sn", TILEPRO_OPC_SLTEH_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth", TILEPRO_OPC_SLTH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth.sn", TILEPRO_OPC_SLTH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth_u", TILEPRO_OPC_SLTH_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slth_u.sn", TILEPRO_OPC_SLTH_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "slti", TILEPRO_OPC_SLTI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "slti.sn", TILEPRO_OPC_SLTI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "slti_u", TILEPRO_OPC_SLTI_U, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } },
- },
- { "slti_u.sn", TILEPRO_OPC_SLTI_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib", TILEPRO_OPC_SLTIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib.sn", TILEPRO_OPC_SLTIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib_u", TILEPRO_OPC_SLTIB_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltib_u.sn", TILEPRO_OPC_SLTIB_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih", TILEPRO_OPC_SLTIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih.sn", TILEPRO_OPC_SLTIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih_u", TILEPRO_OPC_SLTIH_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sltih_u.sn", TILEPRO_OPC_SLTIH_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "sne", TILEPRO_OPC_SNE, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "sne.sn", TILEPRO_OPC_SNE_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneb", TILEPRO_OPC_SNEB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneb.sn", TILEPRO_OPC_SNEB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneh", TILEPRO_OPC_SNEH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sneh.sn", TILEPRO_OPC_SNEH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sra", TILEPRO_OPC_SRA, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "sra.sn", TILEPRO_OPC_SRA_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srab", TILEPRO_OPC_SRAB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srab.sn", TILEPRO_OPC_SRAB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srah", TILEPRO_OPC_SRAH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srah.sn", TILEPRO_OPC_SRAH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "srai", TILEPRO_OPC_SRAI, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } },
- },
- { "srai.sn", TILEPRO_OPC_SRAI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraib", TILEPRO_OPC_SRAIB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraib.sn", TILEPRO_OPC_SRAIB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraih", TILEPRO_OPC_SRAIH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sraih.sn", TILEPRO_OPC_SRAIH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } },
- },
- { "sub", TILEPRO_OPC_SUB, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "sub.sn", TILEPRO_OPC_SUB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subb", TILEPRO_OPC_SUBB, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subb.sn", TILEPRO_OPC_SUBB_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subbs_u", TILEPRO_OPC_SUBBS_U, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subbs_u.sn", TILEPRO_OPC_SUBBS_U_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subh", TILEPRO_OPC_SUBH, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subh.sn", TILEPRO_OPC_SUBH_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subhs", TILEPRO_OPC_SUBHS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subhs.sn", TILEPRO_OPC_SUBHS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subs", TILEPRO_OPC_SUBS, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "subs.sn", TILEPRO_OPC_SUBS_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "sw", TILEPRO_OPC_SW, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } },
- },
- { "swadd", TILEPRO_OPC_SWADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } },
- },
- { "swint0", TILEPRO_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint1", TILEPRO_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint2", TILEPRO_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint3", TILEPRO_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb0", TILEPRO_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb0.sn", TILEPRO_OPC_TBLIDXB0_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb1", TILEPRO_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb1.sn", TILEPRO_OPC_TBLIDXB1_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb2", TILEPRO_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb2.sn", TILEPRO_OPC_TBLIDXB2_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb3", TILEPRO_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1,
- { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } },
- },
- { "tblidxb3.sn", TILEPRO_OPC_TBLIDXB3_SN, 0x1, 2, TREG_SN, 1,
- { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "tns", TILEPRO_OPC_TNS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "tns.sn", TILEPRO_OPC_TNS_SN, 0x2, 2, TREG_SN, 1,
- { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "wh64", TILEPRO_OPC_WH64, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } },
- },
- { "xor", TILEPRO_OPC_XOR, 0xf, 3, TREG_ZERO, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } },
- },
- { "xor.sn", TILEPRO_OPC_XOR_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "xori", TILEPRO_OPC_XORI, 0x3, 3, TREG_ZERO, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "xori.sn", TILEPRO_OPC_XORI_SN, 0x3, 3, TREG_SN, 1,
- { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { NULL, TILEPRO_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } },
- }
-};
-#define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6))
-#define CHILD(array_index) (TILEPRO_OPC_NONE + (array_index))
-
-static const unsigned short decode_X0_fsm[1153] =
-{
- BITFIELD(22, 9) /* index 0 */,
- CHILD(513), CHILD(530), CHILD(547), CHILD(564), CHILD(596), CHILD(613),
- CHILD(630), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(663), CHILD(680), CHILD(697),
- CHILD(714), CHILD(746), CHILD(763), CHILD(780), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813),
- CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828),
- CHILD(828), CHILD(828), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(873), CHILD(878), CHILD(883), CHILD(903), CHILD(908),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(913),
- CHILD(918), CHILD(923), CHILD(943), CHILD(948), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(953), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(988), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(993), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1076), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(18, 4) /* index 513 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD,
- TILEPRO_OPC_ADIFFB_U, TILEPRO_OPC_ADIFFH, TILEPRO_OPC_AND,
- TILEPRO_OPC_AVGB_U, TILEPRO_OPC_AVGH, TILEPRO_OPC_CRC32_32,
- TILEPRO_OPC_CRC32_8, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH,
- TILEPRO_OPC_INTLB, TILEPRO_OPC_INTLH, TILEPRO_OPC_MAXB_U,
- BITFIELD(18, 4) /* index 530 */,
- TILEPRO_OPC_MAXH, TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB,
- TILEPRO_OPC_MNZH, TILEPRO_OPC_MNZ, TILEPRO_OPC_MULHHA_SS,
- TILEPRO_OPC_MULHHA_SU, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULHHSA_UU,
- TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_SU, TILEPRO_OPC_MULHH_UU,
- TILEPRO_OPC_MULHLA_SS, TILEPRO_OPC_MULHLA_SU, TILEPRO_OPC_MULHLA_US,
- BITFIELD(18, 4) /* index 547 */,
- TILEPRO_OPC_MULHLA_UU, TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_MULHL_SS,
- TILEPRO_OPC_MULHL_SU, TILEPRO_OPC_MULHL_US, TILEPRO_OPC_MULHL_UU,
- TILEPRO_OPC_MULLLA_SS, TILEPRO_OPC_MULLLA_SU, TILEPRO_OPC_MULLLA_UU,
- TILEPRO_OPC_MULLLSA_UU, TILEPRO_OPC_MULLL_SS, TILEPRO_OPC_MULLL_SU,
- TILEPRO_OPC_MULLL_UU, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZB,
- BITFIELD(18, 4) /* index 564 */,
- TILEPRO_OPC_MZH, TILEPRO_OPC_MZ, TILEPRO_OPC_NOR, CHILD(581),
- TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB, TILEPRO_OPC_RL, TILEPRO_OPC_S1A,
- TILEPRO_OPC_S2A, TILEPRO_OPC_S3A, TILEPRO_OPC_SADAB_U, TILEPRO_OPC_SADAH,
- TILEPRO_OPC_SADAH_U, TILEPRO_OPC_SADB_U, TILEPRO_OPC_SADH,
- TILEPRO_OPC_SADH_U,
- BITFIELD(12, 2) /* index 581 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(586),
- BITFIELD(14, 2) /* index 586 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(591),
- BITFIELD(16, 2) /* index 591 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(18, 4) /* index 596 */,
- TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ, TILEPRO_OPC_SHLB,
- TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB, TILEPRO_OPC_SHRH,
- TILEPRO_OPC_SHR, TILEPRO_OPC_SLTB, TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB,
- TILEPRO_OPC_SLTEB_U, TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U,
- TILEPRO_OPC_SLTE,
- BITFIELD(18, 4) /* index 613 */,
- TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT,
- TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE,
- TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB,
- TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB, TILEPRO_OPC_XOR, TILEPRO_OPC_DWORD_ALIGN,
- BITFIELD(18, 3) /* index 630 */,
- CHILD(639), CHILD(642), CHILD(645), CHILD(648), CHILD(651), CHILD(654),
- CHILD(657), CHILD(660),
- BITFIELD(21, 1) /* index 639 */,
- TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 642 */,
- TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 645 */,
- TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 648 */,
- TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 651 */,
- TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 654 */,
- TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 657 */,
- TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 660 */,
- TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE,
- BITFIELD(18, 4) /* index 663 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN,
- TILEPRO_OPC_ADD_SN, TILEPRO_OPC_ADIFFB_U_SN, TILEPRO_OPC_ADIFFH_SN,
- TILEPRO_OPC_AND_SN, TILEPRO_OPC_AVGB_U_SN, TILEPRO_OPC_AVGH_SN,
- TILEPRO_OPC_CRC32_32_SN, TILEPRO_OPC_CRC32_8_SN, TILEPRO_OPC_INTHB_SN,
- TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN,
- TILEPRO_OPC_MAXB_U_SN,
- BITFIELD(18, 4) /* index 680 */,
- TILEPRO_OPC_MAXH_SN, TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN,
- TILEPRO_OPC_MNZB_SN, TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN,
- TILEPRO_OPC_MULHHA_SS_SN, TILEPRO_OPC_MULHHA_SU_SN,
- TILEPRO_OPC_MULHHA_UU_SN, TILEPRO_OPC_MULHHSA_UU_SN,
- TILEPRO_OPC_MULHH_SS_SN, TILEPRO_OPC_MULHH_SU_SN, TILEPRO_OPC_MULHH_UU_SN,
- TILEPRO_OPC_MULHLA_SS_SN, TILEPRO_OPC_MULHLA_SU_SN,
- TILEPRO_OPC_MULHLA_US_SN,
- BITFIELD(18, 4) /* index 697 */,
- TILEPRO_OPC_MULHLA_UU_SN, TILEPRO_OPC_MULHLSA_UU_SN,
- TILEPRO_OPC_MULHL_SS_SN, TILEPRO_OPC_MULHL_SU_SN, TILEPRO_OPC_MULHL_US_SN,
- TILEPRO_OPC_MULHL_UU_SN, TILEPRO_OPC_MULLLA_SS_SN, TILEPRO_OPC_MULLLA_SU_SN,
- TILEPRO_OPC_MULLLA_UU_SN, TILEPRO_OPC_MULLLSA_UU_SN,
- TILEPRO_OPC_MULLL_SS_SN, TILEPRO_OPC_MULLL_SU_SN, TILEPRO_OPC_MULLL_UU_SN,
- TILEPRO_OPC_MVNZ_SN, TILEPRO_OPC_MVZ_SN, TILEPRO_OPC_MZB_SN,
- BITFIELD(18, 4) /* index 714 */,
- TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(731),
- TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN,
- TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN,
- TILEPRO_OPC_SADAB_U_SN, TILEPRO_OPC_SADAH_SN, TILEPRO_OPC_SADAH_U_SN,
- TILEPRO_OPC_SADB_U_SN, TILEPRO_OPC_SADH_SN, TILEPRO_OPC_SADH_U_SN,
- BITFIELD(12, 2) /* index 731 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(736),
- BITFIELD(14, 2) /* index 736 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(741),
- BITFIELD(16, 2) /* index 741 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN,
- TILEPRO_OPC_MOVE_SN,
- BITFIELD(18, 4) /* index 746 */,
- TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN, TILEPRO_OPC_SEQ_SN,
- TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN, TILEPRO_OPC_SHL_SN,
- TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN, TILEPRO_OPC_SHR_SN,
- TILEPRO_OPC_SLTB_SN, TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN,
- TILEPRO_OPC_SLTEB_U_SN, TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN,
- TILEPRO_OPC_SLTE_SN,
- BITFIELD(18, 4) /* index 763 */,
- TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN,
- TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN,
- TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN,
- TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN,
- TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN, TILEPRO_OPC_XOR_SN,
- TILEPRO_OPC_DWORD_ALIGN_SN,
- BITFIELD(18, 3) /* index 780 */,
- CHILD(789), CHILD(792), CHILD(795), CHILD(798), CHILD(801), CHILD(804),
- CHILD(807), CHILD(810),
- BITFIELD(21, 1) /* index 789 */,
- TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 792 */,
- TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 795 */,
- TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 798 */,
- TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 801 */,
- TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 804 */,
- TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 807 */,
- TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(21, 1) /* index 810 */,
- TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(6, 2) /* index 813 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(818),
- BITFIELD(8, 2) /* index 818 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(823),
- BITFIELD(10, 2) /* index 823 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- TILEPRO_OPC_MOVELI_SN,
- BITFIELD(6, 2) /* index 828 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(833),
- BITFIELD(8, 2) /* index 833 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(838),
- BITFIELD(10, 2) /* index 838 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI,
- BITFIELD(0, 2) /* index 843 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(848),
- BITFIELD(2, 2) /* index 848 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(853),
- BITFIELD(4, 2) /* index 853 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(858),
- BITFIELD(6, 2) /* index 858 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(863),
- BITFIELD(8, 2) /* index 863 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(868),
- BITFIELD(10, 2) /* index 868 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL,
- BITFIELD(20, 2) /* index 873 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI,
- BITFIELD(20, 2) /* index 878 */,
- TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MINIB_U,
- TILEPRO_OPC_MINIH,
- BITFIELD(20, 2) /* index 883 */,
- CHILD(888), TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI,
- BITFIELD(6, 2) /* index 888 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(893),
- BITFIELD(8, 2) /* index 893 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(898),
- BITFIELD(10, 2) /* index 898 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(20, 2) /* index 903 */,
- TILEPRO_OPC_SLTIB, TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH,
- TILEPRO_OPC_SLTIH_U,
- BITFIELD(20, 2) /* index 908 */,
- TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(20, 2) /* index 913 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN,
- TILEPRO_OPC_ADDI_SN,
- BITFIELD(20, 2) /* index 918 */,
- TILEPRO_OPC_MAXIB_U_SN, TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MINIB_U_SN,
- TILEPRO_OPC_MINIH_SN,
- BITFIELD(20, 2) /* index 923 */,
- CHILD(928), TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN,
- BITFIELD(6, 2) /* index 928 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(933),
- BITFIELD(8, 2) /* index 933 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(938),
- BITFIELD(10, 2) /* index 938 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN,
- TILEPRO_OPC_MOVEI_SN,
- BITFIELD(20, 2) /* index 943 */,
- TILEPRO_OPC_SLTIB_SN, TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN,
- TILEPRO_OPC_SLTIH_U_SN,
- BITFIELD(20, 2) /* index 948 */,
- TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(20, 2) /* index 953 */,
- TILEPRO_OPC_NONE, CHILD(958), TILEPRO_OPC_XORI, TILEPRO_OPC_NONE,
- BITFIELD(0, 2) /* index 958 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(963),
- BITFIELD(2, 2) /* index 963 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(968),
- BITFIELD(4, 2) /* index 968 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(973),
- BITFIELD(6, 2) /* index 973 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(978),
- BITFIELD(8, 2) /* index 978 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(983),
- BITFIELD(10, 2) /* index 983 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(20, 2) /* index 988 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_XORI_SN,
- TILEPRO_OPC_NONE,
- BITFIELD(17, 5) /* index 993 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLIB, TILEPRO_OPC_SHLIH,
- TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRIB, TILEPRO_OPC_SHRIH, TILEPRO_OPC_SHRI,
- TILEPRO_OPC_SRAIB, TILEPRO_OPC_SRAIH, TILEPRO_OPC_SRAI, CHILD(1026),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(12, 4) /* index 1026 */,
- TILEPRO_OPC_NONE, CHILD(1043), CHILD(1046), CHILD(1049), CHILD(1052),
- CHILD(1055), CHILD(1058), CHILD(1061), CHILD(1064), CHILD(1067),
- CHILD(1070), CHILD(1073), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1043 */,
- TILEPRO_OPC_BITX, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1046 */,
- TILEPRO_OPC_BYTEX, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1049 */,
- TILEPRO_OPC_CLZ, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1052 */,
- TILEPRO_OPC_CTZ, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1055 */,
- TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1058 */,
- TILEPRO_OPC_NOP, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1061 */,
- TILEPRO_OPC_PCNT, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1064 */,
- TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1067 */,
- TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1070 */,
- TILEPRO_OPC_TBLIDXB2, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1073 */,
- TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE,
- BITFIELD(17, 5) /* index 1076 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_RLI_SN, TILEPRO_OPC_SHLIB_SN,
- TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_SHRIB_SN,
- TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_SRAIB_SN,
- TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_SRAI_SN, CHILD(1109), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(12, 4) /* index 1109 */,
- TILEPRO_OPC_NONE, CHILD(1126), CHILD(1129), CHILD(1132), CHILD(1135),
- CHILD(1055), CHILD(1058), CHILD(1138), CHILD(1141), CHILD(1144),
- CHILD(1147), CHILD(1150), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1126 */,
- TILEPRO_OPC_BITX_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1129 */,
- TILEPRO_OPC_BYTEX_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1132 */,
- TILEPRO_OPC_CLZ_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1135 */,
- TILEPRO_OPC_CTZ_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1138 */,
- TILEPRO_OPC_PCNT_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1141 */,
- TILEPRO_OPC_TBLIDXB0_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1144 */,
- TILEPRO_OPC_TBLIDXB1_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1147 */,
- TILEPRO_OPC_TBLIDXB2_SN, TILEPRO_OPC_NONE,
- BITFIELD(16, 1) /* index 1150 */,
- TILEPRO_OPC_TBLIDXB3_SN, TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_X1_fsm[1540] =
-{
- BITFIELD(54, 9) /* index 0 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(513), CHILD(561), CHILD(594), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(641),
- CHILD(689), CHILD(722), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766),
- CHILD(766), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781),
- CHILD(781), CHILD(781), CHILD(781), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796),
- CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(826),
- CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826),
- CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826),
- CHILD(826), CHILD(826), CHILD(826), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843),
- CHILD(843), CHILD(860), CHILD(899), CHILD(923), CHILD(932),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(941), CHILD(950), CHILD(974), CHILD(983), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM,
- TILEPRO_OPC_MM, TILEPRO_OPC_MM, TILEPRO_OPC_MM, CHILD(992),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, CHILD(1334),
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J,
- TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_J, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL,
- TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_JAL, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(49, 5) /* index 513 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB, TILEPRO_OPC_ADDH, TILEPRO_OPC_ADD,
- TILEPRO_OPC_AND, TILEPRO_OPC_INTHB, TILEPRO_OPC_INTHH, TILEPRO_OPC_INTLB,
- TILEPRO_OPC_INTLH, TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP,
- TILEPRO_OPC_JR, TILEPRO_OPC_LNK, TILEPRO_OPC_MAXB_U, TILEPRO_OPC_MAXH,
- TILEPRO_OPC_MINB_U, TILEPRO_OPC_MINH, TILEPRO_OPC_MNZB, TILEPRO_OPC_MNZH,
- TILEPRO_OPC_MNZ, TILEPRO_OPC_MZB, TILEPRO_OPC_MZH, TILEPRO_OPC_MZ,
- TILEPRO_OPC_NOR, CHILD(546), TILEPRO_OPC_PACKHB, TILEPRO_OPC_PACKLB,
- TILEPRO_OPC_RL, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_S3A,
- BITFIELD(43, 2) /* index 546 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(551),
- BITFIELD(45, 2) /* index 551 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(556),
- BITFIELD(47, 2) /* index 556 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(49, 5) /* index 561 */,
- TILEPRO_OPC_SB, TILEPRO_OPC_SEQB, TILEPRO_OPC_SEQH, TILEPRO_OPC_SEQ,
- TILEPRO_OPC_SHLB, TILEPRO_OPC_SHLH, TILEPRO_OPC_SHL, TILEPRO_OPC_SHRB,
- TILEPRO_OPC_SHRH, TILEPRO_OPC_SHR, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB,
- TILEPRO_OPC_SLTB_U, TILEPRO_OPC_SLTEB, TILEPRO_OPC_SLTEB_U,
- TILEPRO_OPC_SLTEH, TILEPRO_OPC_SLTEH_U, TILEPRO_OPC_SLTE,
- TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLTH, TILEPRO_OPC_SLTH_U, TILEPRO_OPC_SLT,
- TILEPRO_OPC_SLT_U, TILEPRO_OPC_SNEB, TILEPRO_OPC_SNEH, TILEPRO_OPC_SNE,
- TILEPRO_OPC_SRAB, TILEPRO_OPC_SRAH, TILEPRO_OPC_SRA, TILEPRO_OPC_SUBB,
- TILEPRO_OPC_SUBH, TILEPRO_OPC_SUB,
- BITFIELD(49, 4) /* index 594 */,
- CHILD(611), CHILD(614), CHILD(617), CHILD(620), CHILD(623), CHILD(626),
- CHILD(629), CHILD(632), CHILD(635), CHILD(638), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 611 */,
- TILEPRO_OPC_SW, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 614 */,
- TILEPRO_OPC_XOR, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 617 */,
- TILEPRO_OPC_ADDS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 620 */,
- TILEPRO_OPC_SUBS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 623 */,
- TILEPRO_OPC_ADDBS_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 626 */,
- TILEPRO_OPC_ADDHS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 629 */,
- TILEPRO_OPC_SUBBS_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 632 */,
- TILEPRO_OPC_SUBHS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 635 */,
- TILEPRO_OPC_PACKHS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 638 */,
- TILEPRO_OPC_PACKBS_U, TILEPRO_OPC_NONE,
- BITFIELD(49, 5) /* index 641 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDB_SN, TILEPRO_OPC_ADDH_SN,
- TILEPRO_OPC_ADD_SN, TILEPRO_OPC_AND_SN, TILEPRO_OPC_INTHB_SN,
- TILEPRO_OPC_INTHH_SN, TILEPRO_OPC_INTLB_SN, TILEPRO_OPC_INTLH_SN,
- TILEPRO_OPC_JALRP, TILEPRO_OPC_JALR, TILEPRO_OPC_JRP, TILEPRO_OPC_JR,
- TILEPRO_OPC_LNK_SN, TILEPRO_OPC_MAXB_U_SN, TILEPRO_OPC_MAXH_SN,
- TILEPRO_OPC_MINB_U_SN, TILEPRO_OPC_MINH_SN, TILEPRO_OPC_MNZB_SN,
- TILEPRO_OPC_MNZH_SN, TILEPRO_OPC_MNZ_SN, TILEPRO_OPC_MZB_SN,
- TILEPRO_OPC_MZH_SN, TILEPRO_OPC_MZ_SN, TILEPRO_OPC_NOR_SN, CHILD(674),
- TILEPRO_OPC_PACKHB_SN, TILEPRO_OPC_PACKLB_SN, TILEPRO_OPC_RL_SN,
- TILEPRO_OPC_S1A_SN, TILEPRO_OPC_S2A_SN, TILEPRO_OPC_S3A_SN,
- BITFIELD(43, 2) /* index 674 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(679),
- BITFIELD(45, 2) /* index 679 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, CHILD(684),
- BITFIELD(47, 2) /* index 684 */,
- TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN, TILEPRO_OPC_OR_SN,
- TILEPRO_OPC_MOVE_SN,
- BITFIELD(49, 5) /* index 689 */,
- TILEPRO_OPC_SB, TILEPRO_OPC_SEQB_SN, TILEPRO_OPC_SEQH_SN,
- TILEPRO_OPC_SEQ_SN, TILEPRO_OPC_SHLB_SN, TILEPRO_OPC_SHLH_SN,
- TILEPRO_OPC_SHL_SN, TILEPRO_OPC_SHRB_SN, TILEPRO_OPC_SHRH_SN,
- TILEPRO_OPC_SHR_SN, TILEPRO_OPC_SH, TILEPRO_OPC_SLTB_SN,
- TILEPRO_OPC_SLTB_U_SN, TILEPRO_OPC_SLTEB_SN, TILEPRO_OPC_SLTEB_U_SN,
- TILEPRO_OPC_SLTEH_SN, TILEPRO_OPC_SLTEH_U_SN, TILEPRO_OPC_SLTE_SN,
- TILEPRO_OPC_SLTE_U_SN, TILEPRO_OPC_SLTH_SN, TILEPRO_OPC_SLTH_U_SN,
- TILEPRO_OPC_SLT_SN, TILEPRO_OPC_SLT_U_SN, TILEPRO_OPC_SNEB_SN,
- TILEPRO_OPC_SNEH_SN, TILEPRO_OPC_SNE_SN, TILEPRO_OPC_SRAB_SN,
- TILEPRO_OPC_SRAH_SN, TILEPRO_OPC_SRA_SN, TILEPRO_OPC_SUBB_SN,
- TILEPRO_OPC_SUBH_SN, TILEPRO_OPC_SUB_SN,
- BITFIELD(49, 4) /* index 722 */,
- CHILD(611), CHILD(739), CHILD(742), CHILD(745), CHILD(748), CHILD(751),
- CHILD(754), CHILD(757), CHILD(760), CHILD(763), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 739 */,
- TILEPRO_OPC_XOR_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 742 */,
- TILEPRO_OPC_ADDS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 745 */,
- TILEPRO_OPC_SUBS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 748 */,
- TILEPRO_OPC_ADDBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 751 */,
- TILEPRO_OPC_ADDHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 754 */,
- TILEPRO_OPC_SUBBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 757 */,
- TILEPRO_OPC_SUBHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 760 */,
- TILEPRO_OPC_PACKHS_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 763 */,
- TILEPRO_OPC_PACKBS_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(37, 2) /* index 766 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(771),
- BITFIELD(39, 2) /* index 771 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- CHILD(776),
- BITFIELD(41, 2) /* index 776 */,
- TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN, TILEPRO_OPC_ADDLI_SN,
- TILEPRO_OPC_MOVELI_SN,
- BITFIELD(37, 2) /* index 781 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(786),
- BITFIELD(39, 2) /* index 786 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, CHILD(791),
- BITFIELD(41, 2) /* index 791 */,
- TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_ADDLI, TILEPRO_OPC_MOVELI,
- BITFIELD(31, 2) /* index 796 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(801),
- BITFIELD(33, 2) /* index 801 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(806),
- BITFIELD(35, 2) /* index 806 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(811),
- BITFIELD(37, 2) /* index 811 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(816),
- BITFIELD(39, 2) /* index 816 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, CHILD(821),
- BITFIELD(41, 2) /* index 821 */,
- TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_AULI, TILEPRO_OPC_INFOL,
- BITFIELD(31, 4) /* index 826 */,
- TILEPRO_OPC_BZ, TILEPRO_OPC_BZT, TILEPRO_OPC_BNZ, TILEPRO_OPC_BNZT,
- TILEPRO_OPC_BGZ, TILEPRO_OPC_BGZT, TILEPRO_OPC_BGEZ, TILEPRO_OPC_BGEZT,
- TILEPRO_OPC_BLZ, TILEPRO_OPC_BLZT, TILEPRO_OPC_BLEZ, TILEPRO_OPC_BLEZT,
- TILEPRO_OPC_BBS, TILEPRO_OPC_BBST, TILEPRO_OPC_BBNS, TILEPRO_OPC_BBNST,
- BITFIELD(31, 4) /* index 843 */,
- TILEPRO_OPC_BZ_SN, TILEPRO_OPC_BZT_SN, TILEPRO_OPC_BNZ_SN,
- TILEPRO_OPC_BNZT_SN, TILEPRO_OPC_BGZ_SN, TILEPRO_OPC_BGZT_SN,
- TILEPRO_OPC_BGEZ_SN, TILEPRO_OPC_BGEZT_SN, TILEPRO_OPC_BLZ_SN,
- TILEPRO_OPC_BLZT_SN, TILEPRO_OPC_BLEZ_SN, TILEPRO_OPC_BLEZT_SN,
- TILEPRO_OPC_BBS_SN, TILEPRO_OPC_BBST_SN, TILEPRO_OPC_BBNS_SN,
- TILEPRO_OPC_BBNST_SN,
- BITFIELD(51, 3) /* index 860 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB, TILEPRO_OPC_ADDIH, TILEPRO_OPC_ADDI,
- CHILD(869), TILEPRO_OPC_MAXIB_U, TILEPRO_OPC_MAXIH, TILEPRO_OPC_MFSPR,
- BITFIELD(31, 2) /* index 869 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(874),
- BITFIELD(33, 2) /* index 874 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(879),
- BITFIELD(35, 2) /* index 879 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(884),
- BITFIELD(37, 2) /* index 884 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(889),
- BITFIELD(39, 2) /* index 889 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(894),
- BITFIELD(41, 2) /* index 894 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(51, 3) /* index 899 */,
- TILEPRO_OPC_MINIB_U, TILEPRO_OPC_MINIH, TILEPRO_OPC_MTSPR, CHILD(908),
- TILEPRO_OPC_SEQIB, TILEPRO_OPC_SEQIH, TILEPRO_OPC_SEQI, TILEPRO_OPC_SLTIB,
- BITFIELD(37, 2) /* index 908 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(913),
- BITFIELD(39, 2) /* index 913 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(918),
- BITFIELD(41, 2) /* index 918 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(51, 3) /* index 923 */,
- TILEPRO_OPC_SLTIB_U, TILEPRO_OPC_SLTIH, TILEPRO_OPC_SLTIH_U,
- TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_XORI, TILEPRO_OPC_LBADD,
- TILEPRO_OPC_LBADD_U,
- BITFIELD(51, 3) /* index 932 */,
- TILEPRO_OPC_LHADD, TILEPRO_OPC_LHADD_U, TILEPRO_OPC_LWADD,
- TILEPRO_OPC_LWADD_NA, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD,
- TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE,
- BITFIELD(51, 3) /* index 941 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_ADDIB_SN, TILEPRO_OPC_ADDIH_SN,
- TILEPRO_OPC_ADDI_SN, TILEPRO_OPC_ANDI_SN, TILEPRO_OPC_MAXIB_U_SN,
- TILEPRO_OPC_MAXIH_SN, TILEPRO_OPC_MFSPR,
- BITFIELD(51, 3) /* index 950 */,
- TILEPRO_OPC_MINIB_U_SN, TILEPRO_OPC_MINIH_SN, TILEPRO_OPC_MTSPR, CHILD(959),
- TILEPRO_OPC_SEQIB_SN, TILEPRO_OPC_SEQIH_SN, TILEPRO_OPC_SEQI_SN,
- TILEPRO_OPC_SLTIB_SN,
- BITFIELD(37, 2) /* index 959 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(964),
- BITFIELD(39, 2) /* index 964 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, CHILD(969),
- BITFIELD(41, 2) /* index 969 */,
- TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN, TILEPRO_OPC_ORI_SN,
- TILEPRO_OPC_MOVEI_SN,
- BITFIELD(51, 3) /* index 974 */,
- TILEPRO_OPC_SLTIB_U_SN, TILEPRO_OPC_SLTIH_SN, TILEPRO_OPC_SLTIH_U_SN,
- TILEPRO_OPC_SLTI_SN, TILEPRO_OPC_SLTI_U_SN, TILEPRO_OPC_XORI_SN,
- TILEPRO_OPC_LBADD_SN, TILEPRO_OPC_LBADD_U_SN,
- BITFIELD(51, 3) /* index 983 */,
- TILEPRO_OPC_LHADD_SN, TILEPRO_OPC_LHADD_U_SN, TILEPRO_OPC_LWADD_SN,
- TILEPRO_OPC_LWADD_NA_SN, TILEPRO_OPC_SBADD, TILEPRO_OPC_SHADD,
- TILEPRO_OPC_SWADD, TILEPRO_OPC_NONE,
- BITFIELD(46, 7) /* index 992 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1124),
- CHILD(1124), CHILD(1124), CHILD(1124), CHILD(1127), CHILD(1127),
- CHILD(1127), CHILD(1127), CHILD(1130), CHILD(1130), CHILD(1130),
- CHILD(1130), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1133),
- CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1139),
- CHILD(1139), CHILD(1139), CHILD(1139), CHILD(1142), CHILD(1142),
- CHILD(1142), CHILD(1142), CHILD(1145), CHILD(1145), CHILD(1145),
- CHILD(1145), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1148),
- CHILD(1151), CHILD(1242), CHILD(1290), CHILD(1323), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1121 */,
- TILEPRO_OPC_RLI, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1124 */,
- TILEPRO_OPC_SHLIB, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1127 */,
- TILEPRO_OPC_SHLIH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1130 */,
- TILEPRO_OPC_SHLI, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1133 */,
- TILEPRO_OPC_SHRIB, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1136 */,
- TILEPRO_OPC_SHRIH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1139 */,
- TILEPRO_OPC_SHRI, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1142 */,
- TILEPRO_OPC_SRAIB, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1145 */,
- TILEPRO_OPC_SRAIH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1148 */,
- TILEPRO_OPC_SRAI, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1151 */,
- TILEPRO_OPC_NONE, CHILD(1160), CHILD(1163), CHILD(1166), CHILD(1169),
- CHILD(1172), CHILD(1175), CHILD(1178),
- BITFIELD(53, 1) /* index 1160 */,
- TILEPRO_OPC_DRAIN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1163 */,
- TILEPRO_OPC_DTLBPR, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1166 */,
- TILEPRO_OPC_FINV, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1169 */,
- TILEPRO_OPC_FLUSH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1172 */,
- TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1175 */,
- TILEPRO_OPC_ICOH, TILEPRO_OPC_NONE,
- BITFIELD(31, 2) /* index 1178 */,
- CHILD(1183), CHILD(1211), CHILD(1239), CHILD(1239),
- BITFIELD(53, 1) /* index 1183 */,
- CHILD(1186), TILEPRO_OPC_NONE,
- BITFIELD(33, 2) /* index 1186 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1191),
- BITFIELD(35, 2) /* index 1191 */,
- TILEPRO_OPC_ILL, CHILD(1196), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(37, 2) /* index 1196 */,
- TILEPRO_OPC_ILL, CHILD(1201), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(39, 2) /* index 1201 */,
- TILEPRO_OPC_ILL, CHILD(1206), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(41, 2) /* index 1206 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_BPT, TILEPRO_OPC_ILL,
- BITFIELD(53, 1) /* index 1211 */,
- CHILD(1214), TILEPRO_OPC_NONE,
- BITFIELD(33, 2) /* index 1214 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, CHILD(1219),
- BITFIELD(35, 2) /* index 1219 */,
- TILEPRO_OPC_ILL, CHILD(1224), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(37, 2) /* index 1224 */,
- TILEPRO_OPC_ILL, CHILD(1229), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(39, 2) /* index 1229 */,
- TILEPRO_OPC_ILL, CHILD(1234), TILEPRO_OPC_ILL, TILEPRO_OPC_ILL,
- BITFIELD(41, 2) /* index 1234 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_ILL, TILEPRO_OPC_RAISE, TILEPRO_OPC_ILL,
- BITFIELD(53, 1) /* index 1239 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1242 */,
- CHILD(1251), CHILD(1254), CHILD(1257), CHILD(1275), CHILD(1278),
- CHILD(1281), CHILD(1284), CHILD(1287),
- BITFIELD(53, 1) /* index 1251 */,
- TILEPRO_OPC_INV, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1254 */,
- TILEPRO_OPC_IRET, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1257 */,
- CHILD(1260), TILEPRO_OPC_NONE,
- BITFIELD(31, 2) /* index 1260 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1265),
- BITFIELD(33, 2) /* index 1265 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(1270),
- BITFIELD(35, 2) /* index 1270 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH,
- BITFIELD(53, 1) /* index 1275 */,
- TILEPRO_OPC_LB_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1278 */,
- TILEPRO_OPC_LH, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1281 */,
- TILEPRO_OPC_LH_U, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1284 */,
- TILEPRO_OPC_LW, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1287 */,
- TILEPRO_OPC_MF, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1290 */,
- CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311),
- CHILD(1314), CHILD(1317), CHILD(1320),
- BITFIELD(53, 1) /* index 1299 */,
- TILEPRO_OPC_NAP, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1302 */,
- TILEPRO_OPC_NOP, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1305 */,
- TILEPRO_OPC_SWINT0, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1308 */,
- TILEPRO_OPC_SWINT1, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1311 */,
- TILEPRO_OPC_SWINT2, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1314 */,
- TILEPRO_OPC_SWINT3, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1317 */,
- TILEPRO_OPC_TNS, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1320 */,
- TILEPRO_OPC_WH64, TILEPRO_OPC_NONE,
- BITFIELD(43, 2) /* index 1323 */,
- CHILD(1328), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(45, 1) /* index 1328 */,
- CHILD(1331), TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1331 */,
- TILEPRO_OPC_LW_NA, TILEPRO_OPC_NONE,
- BITFIELD(46, 7) /* index 1334 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1466),
- CHILD(1466), CHILD(1466), CHILD(1466), CHILD(1469), CHILD(1469),
- CHILD(1469), CHILD(1469), CHILD(1472), CHILD(1472), CHILD(1472),
- CHILD(1472), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1475),
- CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1481),
- CHILD(1481), CHILD(1481), CHILD(1481), CHILD(1484), CHILD(1484),
- CHILD(1484), CHILD(1484), CHILD(1487), CHILD(1487), CHILD(1487),
- CHILD(1487), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1490),
- CHILD(1151), CHILD(1493), CHILD(1517), CHILD(1529), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1463 */,
- TILEPRO_OPC_RLI_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1466 */,
- TILEPRO_OPC_SHLIB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1469 */,
- TILEPRO_OPC_SHLIH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1472 */,
- TILEPRO_OPC_SHLI_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1475 */,
- TILEPRO_OPC_SHRIB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1478 */,
- TILEPRO_OPC_SHRIH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1481 */,
- TILEPRO_OPC_SHRI_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1484 */,
- TILEPRO_OPC_SRAIB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1487 */,
- TILEPRO_OPC_SRAIH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1490 */,
- TILEPRO_OPC_SRAI_SN, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1493 */,
- CHILD(1251), CHILD(1254), CHILD(1502), CHILD(1505), CHILD(1508),
- CHILD(1511), CHILD(1514), CHILD(1287),
- BITFIELD(53, 1) /* index 1502 */,
- TILEPRO_OPC_LB_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1505 */,
- TILEPRO_OPC_LB_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1508 */,
- TILEPRO_OPC_LH_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1511 */,
- TILEPRO_OPC_LH_U_SN, TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1514 */,
- TILEPRO_OPC_LW_SN, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 1517 */,
- CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311),
- CHILD(1314), CHILD(1526), CHILD(1320),
- BITFIELD(53, 1) /* index 1526 */,
- TILEPRO_OPC_TNS_SN, TILEPRO_OPC_NONE,
- BITFIELD(43, 2) /* index 1529 */,
- CHILD(1534), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(45, 1) /* index 1534 */,
- CHILD(1537), TILEPRO_OPC_NONE,
- BITFIELD(53, 1) /* index 1537 */,
- TILEPRO_OPC_LW_NA_SN, TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_Y0_fsm[168] =
-{
- BITFIELD(27, 4) /* index 0 */,
- TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52),
- CHILD(57), CHILD(62), CHILD(67), TILEPRO_OPC_ADDI, CHILD(72), CHILD(102),
- TILEPRO_OPC_SEQI, CHILD(117), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U,
- BITFIELD(18, 2) /* index 17 */,
- TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB,
- BITFIELD(18, 2) /* index 22 */,
- TILEPRO_OPC_MNZ, TILEPRO_OPC_MVNZ, TILEPRO_OPC_MVZ, TILEPRO_OPC_MZ,
- BITFIELD(18, 2) /* index 27 */,
- TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR,
- BITFIELD(12, 2) /* index 32 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37),
- BITFIELD(14, 2) /* index 37 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42),
- BITFIELD(16, 2) /* index 42 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(18, 2) /* index 47 */,
- TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA,
- BITFIELD(18, 2) /* index 52 */,
- TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U,
- BITFIELD(18, 2) /* index 57 */,
- TILEPRO_OPC_MULHLSA_UU, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE,
- BITFIELD(18, 2) /* index 62 */,
- TILEPRO_OPC_MULHH_SS, TILEPRO_OPC_MULHH_UU, TILEPRO_OPC_MULLL_SS,
- TILEPRO_OPC_MULLL_UU,
- BITFIELD(18, 2) /* index 67 */,
- TILEPRO_OPC_MULHHA_SS, TILEPRO_OPC_MULHHA_UU, TILEPRO_OPC_MULLLA_SS,
- TILEPRO_OPC_MULLLA_UU,
- BITFIELD(0, 2) /* index 72 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77),
- BITFIELD(2, 2) /* index 77 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82),
- BITFIELD(4, 2) /* index 82 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87),
- BITFIELD(6, 2) /* index 87 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(92),
- BITFIELD(8, 2) /* index 92 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(97),
- BITFIELD(10, 2) /* index 97 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(6, 2) /* index 102 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(107),
- BITFIELD(8, 2) /* index 107 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(112),
- BITFIELD(10, 2) /* index 112 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(15, 5) /* index 117 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI, TILEPRO_OPC_RLI,
- TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHLI,
- TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI, TILEPRO_OPC_SHRI,
- TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI, TILEPRO_OPC_SRAI,
- CHILD(150), CHILD(159), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(12, 3) /* index 150 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_BITX, TILEPRO_OPC_BYTEX, TILEPRO_OPC_CLZ,
- TILEPRO_OPC_CTZ, TILEPRO_OPC_FNOP, TILEPRO_OPC_NOP, TILEPRO_OPC_PCNT,
- BITFIELD(12, 3) /* index 159 */,
- TILEPRO_OPC_TBLIDXB0, TILEPRO_OPC_TBLIDXB1, TILEPRO_OPC_TBLIDXB2,
- TILEPRO_OPC_TBLIDXB3, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_Y1_fsm[140] =
-{
- BITFIELD(59, 4) /* index 0 */,
- TILEPRO_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52),
- CHILD(57), TILEPRO_OPC_ADDI, CHILD(62), CHILD(92), TILEPRO_OPC_SEQI,
- CHILD(107), TILEPRO_OPC_SLTI, TILEPRO_OPC_SLTI_U, TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE,
- BITFIELD(49, 2) /* index 17 */,
- TILEPRO_OPC_ADD, TILEPRO_OPC_S1A, TILEPRO_OPC_S2A, TILEPRO_OPC_SUB,
- BITFIELD(49, 2) /* index 22 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_MNZ, TILEPRO_OPC_MZ, TILEPRO_OPC_NONE,
- BITFIELD(49, 2) /* index 27 */,
- TILEPRO_OPC_AND, TILEPRO_OPC_NOR, CHILD(32), TILEPRO_OPC_XOR,
- BITFIELD(43, 2) /* index 32 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(37),
- BITFIELD(45, 2) /* index 37 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, CHILD(42),
- BITFIELD(47, 2) /* index 42 */,
- TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_OR, TILEPRO_OPC_MOVE,
- BITFIELD(49, 2) /* index 47 */,
- TILEPRO_OPC_RL, TILEPRO_OPC_SHL, TILEPRO_OPC_SHR, TILEPRO_OPC_SRA,
- BITFIELD(49, 2) /* index 52 */,
- TILEPRO_OPC_SLTE, TILEPRO_OPC_SLTE_U, TILEPRO_OPC_SLT, TILEPRO_OPC_SLT_U,
- BITFIELD(49, 2) /* index 57 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_S3A, TILEPRO_OPC_SEQ, TILEPRO_OPC_SNE,
- BITFIELD(31, 2) /* index 62 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(67),
- BITFIELD(33, 2) /* index 67 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(72),
- BITFIELD(35, 2) /* index 72 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(77),
- BITFIELD(37, 2) /* index 77 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(82),
- BITFIELD(39, 2) /* index 82 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, CHILD(87),
- BITFIELD(41, 2) /* index 87 */,
- TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_ANDI, TILEPRO_OPC_INFO,
- BITFIELD(37, 2) /* index 92 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(97),
- BITFIELD(39, 2) /* index 97 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, CHILD(102),
- BITFIELD(41, 2) /* index 102 */,
- TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_ORI, TILEPRO_OPC_MOVEI,
- BITFIELD(48, 3) /* index 107 */,
- TILEPRO_OPC_NONE, TILEPRO_OPC_RLI, TILEPRO_OPC_SHLI, TILEPRO_OPC_SHRI,
- TILEPRO_OPC_SRAI, CHILD(116), TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(43, 3) /* index 116 */,
- TILEPRO_OPC_NONE, CHILD(125), CHILD(130), CHILD(135), TILEPRO_OPC_NONE,
- TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(46, 2) /* index 125 */,
- TILEPRO_OPC_FNOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(46, 2) /* index 130 */,
- TILEPRO_OPC_ILL, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
- BITFIELD(46, 2) /* index 135 */,
- TILEPRO_OPC_NOP, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE, TILEPRO_OPC_NONE,
-};
-
-static const unsigned short decode_Y2_fsm[24] =
-{
- BITFIELD(56, 3) /* index 0 */,
- CHILD(9), TILEPRO_OPC_LB_U, TILEPRO_OPC_LH, TILEPRO_OPC_LH_U,
- TILEPRO_OPC_LW, TILEPRO_OPC_SB, TILEPRO_OPC_SH, TILEPRO_OPC_SW,
- BITFIELD(20, 2) /* index 9 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(14),
- BITFIELD(22, 2) /* index 14 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, CHILD(19),
- BITFIELD(24, 2) /* index 19 */,
- TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_LB, TILEPRO_OPC_PREFETCH,
-};
-
-#undef BITFIELD
-#undef CHILD
-const unsigned short * const
-tilepro_bundle_decoder_fsms[TILEPRO_NUM_PIPELINE_ENCODINGS] =
-{
- decode_X0_fsm,
- decode_X1_fsm,
- decode_Y0_fsm,
- decode_Y1_fsm,
- decode_Y2_fsm
-};
-const struct tilepro_operand tilepro_operands[43] =
-{
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X0, get_Imm8_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X1, get_Imm8_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y0, get_Imm8_Y0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM8_Y1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y1, get_Imm8_Y1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X0),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X0, get_Imm16_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_IMM16_X1),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X1, get_Imm16_X1
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_JOFFLONG_X1),
- 29, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_JOffLong_X1, get_JOffLong_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X0, get_SrcA_X0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X1, get_Dest_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y0, get_SrcA_Y0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y1, get_Dest_Y1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y1, get_SrcA_Y1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y2, get_SrcA_Y2
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X0, get_SrcB_X0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X1, get_SrcB_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y0, get_SrcB_Y0
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y1, get_SrcB_Y1
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(TILEPRO_BROFF_X1),
- 17, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_BrOff_X1, get_BrOff_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE),
- 28, 1, 0, 0, 1, TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_JOff_X1, get_JOff_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MF_IMM15_X1),
- 15, 0, 0, 0, 0, 0,
- create_MF_Imm15_X1, get_MF_Imm15_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X0),
- 5, 0, 0, 0, 0, 0,
- create_MMStart_X0, get_MMStart_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X0),
- 5, 0, 0, 0, 0, 0,
- create_MMEnd_X0, get_MMEnd_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMSTART_X1),
- 5, 0, 0, 0, 0, 0,
- create_MMStart_X1, get_MMStart_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_MMEND_X1),
- 5, 0, 0, 0, 0, 0,
- create_MMEnd_X1, get_MMEnd_X1
- },
- {
- TILEPRO_OP_TYPE_SPR, BFD_RELOC(TILEPRO_MT_IMM15_X1),
- 15, 0, 0, 0, 0, 0,
- create_MT_Imm15_X1, get_MT_Imm15_X1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X0),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_X0, get_ShAmt_X0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_X1),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_X1, get_ShAmt_X1
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y0),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_Y0, get_ShAmt_Y0
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_SHAMT_Y1),
- 5, 0, 0, 0, 0, 0,
- create_ShAmt_Y1, get_ShAmt_Y1
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEPRO_DEST_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Dest_Imm8_X1, get_Dest_Imm8_X1
- },
- {
- TILEPRO_OP_TYPE_ADDRESS, BFD_RELOC(NONE),
- 10, 1, 0, 0, 1, TILEPRO_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES,
- create_BrOff_SN, get_BrOff_SN
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
- 8, 0, 0, 0, 0, 0,
- create_Imm8_SN, get_Imm8_SN
- },
- {
- TILEPRO_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_SN, get_Imm8_SN
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 2, 0, 0, 1, 0, 0,
- create_Dest_SN, get_Dest_SN
- },
- {
- TILEPRO_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 2, 0, 1, 0, 0, 0,
- create_Src_SN, get_Src_SN
- }
-};
-
-
-
-
-/* Given a set of bundle bits and a specific pipe, returns which
- * instruction the bundle contains in that pipe.
- */
-const struct tilepro_opcode *
-find_opcode(tilepro_bundle_bits bits, tilepro_pipeline pipe)
-{
- const unsigned short *table = tilepro_bundle_decoder_fsms[pipe];
- int index = 0;
-
- while (1)
- {
- unsigned short bitspec = table[index];
- unsigned int bitfield =
- ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6);
-
- unsigned short next = table[index + 1 + bitfield];
- if (next <= TILEPRO_OPC_NONE)
- return &tilepro_opcodes[next];
-
- index = next - TILEPRO_OPC_NONE;
- }
-}
-
-
-int
-parse_insn_tilepro(tilepro_bundle_bits bits,
- unsigned int pc,
- struct tilepro_decoded_instruction
- decoded[TILEPRO_MAX_INSTRUCTIONS_PER_BUNDLE])
-{
- int num_instructions = 0;
- int pipe;
-
- int min_pipe, max_pipe;
- if ((bits & TILEPRO_BUNDLE_Y_ENCODING_MASK) == 0)
- {
- min_pipe = TILEPRO_PIPELINE_X0;
- max_pipe = TILEPRO_PIPELINE_X1;
- }
- else
- {
- min_pipe = TILEPRO_PIPELINE_Y0;
- max_pipe = TILEPRO_PIPELINE_Y2;
- }
-
- /* For each pipe, find an instruction that fits. */
- for (pipe = min_pipe; pipe <= max_pipe; pipe++)
- {
- const struct tilepro_opcode *opc;
- struct tilepro_decoded_instruction *d;
- int i;
-
- d = &decoded[num_instructions++];
- opc = find_opcode (bits, (tilepro_pipeline)pipe);
- d->opcode = opc;
-
- /* Decode each operand, sign extending, etc. as appropriate. */
- for (i = 0; i < opc->num_operands; i++)
- {
- const struct tilepro_operand *op =
- &tilepro_operands[opc->operands[pipe][i]];
- int opval = op->extract (bits);
- if (op->is_signed)
- {
- /* Sign-extend the operand. */
- int shift = (int)((sizeof(int) * 8) - op->num_bits);
- opval = (opval << shift) >> shift;
- }
-
- /* Adjust PC-relative scaled branch offsets. */
- if (op->type == TILEPRO_OP_TYPE_ADDRESS)
- {
- opval *= TILEPRO_BUNDLE_SIZE_IN_BYTES;
- opval += (int)pc;
- }
-
- /* Record the final value. */
- d->operands[i] = op;
- d->operand_values[i] = opval;
- }
- }
-
- return num_instructions;
-}
diff --git a/arch/tile/kernel/tile-desc_64.c b/arch/tile/kernel/tile-desc_64.c
deleted file mode 100644
index 65b5f8aca706..000000000000
--- a/arch/tile/kernel/tile-desc_64.c
+++ /dev/null
@@ -1,2218 +0,0 @@
-/* TILE-Gx opcode information.
- *
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- *
- *
- *
- *
- */
-
-/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */
-#define BFD_RELOC(x) -1
-
-/* Special registers. */
-#define TREG_LR 55
-#define TREG_SN 56
-#define TREG_ZERO 63
-
-#include <linux/stddef.h>
-#include <asm/tile-desc.h>
-
-const struct tilegx_opcode tilegx_opcodes[334] =
-{
- { "bpt", TILEGX_OPC_BPT, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "info", TILEGX_OPC_INFO, 0xf, 1, TREG_ZERO, 1,
- { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } },
- },
- { "infol", TILEGX_OPC_INFOL, 0x3, 1, TREG_ZERO, 1,
- { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "move", TILEGX_OPC_MOVE, 0xf, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 8, 9 }, { 10, 11 }, { 12, 13 }, { 0, } },
- },
- { "movei", TILEGX_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1,
- { { 6, 0 }, { 8, 1 }, { 10, 2 }, { 12, 3 }, { 0, } },
- },
- { "moveli", TILEGX_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1,
- { { 6, 4 }, { 8, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch", TILEGX_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_add_l1", TILEGX_OPC_PREFETCH_ADD_L1, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l1_fault", TILEGX_OPC_PREFETCH_ADD_L1_FAULT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l2", TILEGX_OPC_PREFETCH_ADD_L2, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l2_fault", TILEGX_OPC_PREFETCH_ADD_L2_FAULT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l3", TILEGX_OPC_PREFETCH_ADD_L3, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_add_l3_fault", TILEGX_OPC_PREFETCH_ADD_L3_FAULT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "prefetch_l1", TILEGX_OPC_PREFETCH_L1, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l1_fault", TILEGX_OPC_PREFETCH_L1_FAULT, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l2", TILEGX_OPC_PREFETCH_L2, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l2_fault", TILEGX_OPC_PREFETCH_L2_FAULT, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l3", TILEGX_OPC_PREFETCH_L3, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "prefetch_l3_fault", TILEGX_OPC_PREFETCH_L3_FAULT, 0x12, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
- },
- { "raise", TILEGX_OPC_RAISE, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "add", TILEGX_OPC_ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "addi", TILEGX_OPC_ADDI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "addli", TILEGX_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addx", TILEGX_OPC_ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "addxi", TILEGX_OPC_ADDXI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "addxli", TILEGX_OPC_ADDXLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "addxsc", TILEGX_OPC_ADDXSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "and", TILEGX_OPC_AND, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "andi", TILEGX_OPC_ANDI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "beqz", TILEGX_OPC_BEQZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "beqzt", TILEGX_OPC_BEQZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bfexts", TILEGX_OPC_BFEXTS, 0x1, 4, TREG_ZERO, 1,
- { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bfextu", TILEGX_OPC_BFEXTU, 0x1, 4, TREG_ZERO, 1,
- { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bfins", TILEGX_OPC_BFINS, 0x1, 4, TREG_ZERO, 1,
- { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "bgez", TILEGX_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgezt", TILEGX_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgtz", TILEGX_OPC_BGTZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bgtzt", TILEGX_OPC_BGTZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbc", TILEGX_OPC_BLBC, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbct", TILEGX_OPC_BLBCT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbs", TILEGX_OPC_BLBS, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blbst", TILEGX_OPC_BLBST, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blez", TILEGX_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "blezt", TILEGX_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bltz", TILEGX_OPC_BLTZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bltzt", TILEGX_OPC_BLTZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnez", TILEGX_OPC_BNEZ, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "bnezt", TILEGX_OPC_BNEZT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
- },
- { "clz", TILEGX_OPC_CLZ, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "cmoveqz", TILEGX_OPC_CMOVEQZ, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "cmovnez", TILEGX_OPC_CMOVNEZ, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "cmpeq", TILEGX_OPC_CMPEQ, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpeqi", TILEGX_OPC_CMPEQI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "cmpexch", TILEGX_OPC_CMPEXCH, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "cmpexch4", TILEGX_OPC_CMPEXCH4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "cmples", TILEGX_OPC_CMPLES, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpleu", TILEGX_OPC_CMPLEU, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmplts", TILEGX_OPC_CMPLTS, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpltsi", TILEGX_OPC_CMPLTSI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
- },
- { "cmpltu", TILEGX_OPC_CMPLTU, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmpltui", TILEGX_OPC_CMPLTUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "cmpne", TILEGX_OPC_CMPNE, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "cmul", TILEGX_OPC_CMUL, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmula", TILEGX_OPC_CMULA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulaf", TILEGX_OPC_CMULAF, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulf", TILEGX_OPC_CMULF, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulfr", TILEGX_OPC_CMULFR, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulh", TILEGX_OPC_CMULH, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "cmulhr", TILEGX_OPC_CMULHR, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_32", TILEGX_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "crc32_8", TILEGX_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "ctz", TILEGX_OPC_CTZ, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "dblalign", TILEGX_OPC_DBLALIGN, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "dblalign2", TILEGX_OPC_DBLALIGN2, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "dblalign4", TILEGX_OPC_DBLALIGN4, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "dblalign6", TILEGX_OPC_DBLALIGN6, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "drain", TILEGX_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "dtlbpr", TILEGX_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "exch", TILEGX_OPC_EXCH, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "exch4", TILEGX_OPC_EXCH4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_add_flags", TILEGX_OPC_FDOUBLE_ADD_FLAGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_addsub", TILEGX_OPC_FDOUBLE_ADDSUB, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_mul_flags", TILEGX_OPC_FDOUBLE_MUL_FLAGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_pack1", TILEGX_OPC_FDOUBLE_PACK1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_pack2", TILEGX_OPC_FDOUBLE_PACK2, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_sub_flags", TILEGX_OPC_FDOUBLE_SUB_FLAGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_unpack_max", TILEGX_OPC_FDOUBLE_UNPACK_MAX, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fdouble_unpack_min", TILEGX_OPC_FDOUBLE_UNPACK_MIN, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchadd", TILEGX_OPC_FETCHADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchadd4", TILEGX_OPC_FETCHADD4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchaddgez", TILEGX_OPC_FETCHADDGEZ, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchaddgez4", TILEGX_OPC_FETCHADDGEZ4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchand", TILEGX_OPC_FETCHAND, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchand4", TILEGX_OPC_FETCHAND4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchor", TILEGX_OPC_FETCHOR, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "fetchor4", TILEGX_OPC_FETCHOR4, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "finv", TILEGX_OPC_FINV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "flush", TILEGX_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "flushwb", TILEGX_OPC_FLUSHWB, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "fnop", TILEGX_OPC_FNOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "fsingle_add1", TILEGX_OPC_FSINGLE_ADD1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_addsub2", TILEGX_OPC_FSINGLE_ADDSUB2, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_mul1", TILEGX_OPC_FSINGLE_MUL1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_mul2", TILEGX_OPC_FSINGLE_MUL2, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_pack1", TILEGX_OPC_FSINGLE_PACK1, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "fsingle_pack2", TILEGX_OPC_FSINGLE_PACK2, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "fsingle_sub1", TILEGX_OPC_FSINGLE_SUB1, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "icoh", TILEGX_OPC_ICOH, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ill", TILEGX_OPC_ILL, 0xa, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { }, { 0, } },
- },
- { "inv", TILEGX_OPC_INV, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "iret", TILEGX_OPC_IRET, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "j", TILEGX_OPC_J, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
- },
- { "jal", TILEGX_OPC_JAL, 0x2, 1, TREG_LR, 1,
- { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
- },
- { "jalr", TILEGX_OPC_JALR, 0xa, 1, TREG_LR, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "jalrp", TILEGX_OPC_JALRP, 0xa, 1, TREG_LR, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "jr", TILEGX_OPC_JR, 0xa, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "jrp", TILEGX_OPC_JRP, 0xa, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
- },
- { "ld", TILEGX_OPC_LD, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld1s", TILEGX_OPC_LD1S, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld1s_add", TILEGX_OPC_LD1S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld1u", TILEGX_OPC_LD1U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld1u_add", TILEGX_OPC_LD1U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld2s", TILEGX_OPC_LD2S, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld2s_add", TILEGX_OPC_LD2S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld2u", TILEGX_OPC_LD2U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld2u_add", TILEGX_OPC_LD2U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld4s", TILEGX_OPC_LD4S, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld4s_add", TILEGX_OPC_LD4S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld4u", TILEGX_OPC_LD4U, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
- },
- { "ld4u_add", TILEGX_OPC_LD4U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ld_add", TILEGX_OPC_LD_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldna", TILEGX_OPC_LDNA, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldna_add", TILEGX_OPC_LDNA_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt", TILEGX_OPC_LDNT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1s", TILEGX_OPC_LDNT1S, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1s_add", TILEGX_OPC_LDNT1S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1u", TILEGX_OPC_LDNT1U, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt1u_add", TILEGX_OPC_LDNT1U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2s", TILEGX_OPC_LDNT2S, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2s_add", TILEGX_OPC_LDNT2S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2u", TILEGX_OPC_LDNT2U, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt2u_add", TILEGX_OPC_LDNT2U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4s", TILEGX_OPC_LDNT4S, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4s_add", TILEGX_OPC_LDNT4S_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4u", TILEGX_OPC_LDNT4U, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt4u_add", TILEGX_OPC_LDNT4U_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "ldnt_add", TILEGX_OPC_LDNT_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "lnk", TILEGX_OPC_LNK, 0xa, 1, TREG_ZERO, 1,
- { { 0, }, { 8 }, { 0, }, { 12 }, { 0, } },
- },
- { "mf", TILEGX_OPC_MF, 0x2, 0, TREG_ZERO, 1,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "mfspr", TILEGX_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 8, 27 }, { 0, }, { 0, }, { 0, } },
- },
- { "mm", TILEGX_OPC_MM, 0x1, 4, TREG_ZERO, 1,
- { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mnz", TILEGX_OPC_MNZ, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "mtspr", TILEGX_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 28, 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hs_hs", TILEGX_OPC_MUL_HS_HS, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mul_hs_hu", TILEGX_OPC_MUL_HS_HU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hs_ls", TILEGX_OPC_MUL_HS_LS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hs_lu", TILEGX_OPC_MUL_HS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hu_hu", TILEGX_OPC_MUL_HU_HU, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mul_hu_ls", TILEGX_OPC_MUL_HU_LS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_hu_lu", TILEGX_OPC_MUL_HU_LU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_ls_ls", TILEGX_OPC_MUL_LS_LS, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mul_ls_lu", TILEGX_OPC_MUL_LS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mul_lu_lu", TILEGX_OPC_MUL_LU_LU, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_hs_hs", TILEGX_OPC_MULA_HS_HS, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_hs_hu", TILEGX_OPC_MULA_HS_HU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hs_ls", TILEGX_OPC_MULA_HS_LS, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hs_lu", TILEGX_OPC_MULA_HS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hu_hu", TILEGX_OPC_MULA_HU_HU, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_hu_ls", TILEGX_OPC_MULA_HU_LS, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_hu_lu", TILEGX_OPC_MULA_HU_LU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_ls_ls", TILEGX_OPC_MULA_LS_LS, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mula_ls_lu", TILEGX_OPC_MULA_LS_LU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "mula_lu_lu", TILEGX_OPC_MULA_LU_LU, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mulax", TILEGX_OPC_MULAX, 0x5, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
- },
- { "mulx", TILEGX_OPC_MULX, 0x5, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
- },
- { "mz", TILEGX_OPC_MZ, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "nap", TILEGX_OPC_NAP, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "nop", TILEGX_OPC_NOP, 0xf, 0, TREG_ZERO, 1,
- { { }, { }, { }, { }, { 0, } },
- },
- { "nor", TILEGX_OPC_NOR, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "or", TILEGX_OPC_OR, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "ori", TILEGX_OPC_ORI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "pcnt", TILEGX_OPC_PCNT, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "revbits", TILEGX_OPC_REVBITS, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "revbytes", TILEGX_OPC_REVBYTES, 0x5, 2, TREG_ZERO, 1,
- { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
- },
- { "rotl", TILEGX_OPC_ROTL, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "rotli", TILEGX_OPC_ROTLI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shl", TILEGX_OPC_SHL, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl16insli", TILEGX_OPC_SHL16INSLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
- },
- { "shl1add", TILEGX_OPC_SHL1ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl1addx", TILEGX_OPC_SHL1ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl2add", TILEGX_OPC_SHL2ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl2addx", TILEGX_OPC_SHL2ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl3add", TILEGX_OPC_SHL3ADD, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shl3addx", TILEGX_OPC_SHL3ADDX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shli", TILEGX_OPC_SHLI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shlx", TILEGX_OPC_SHLX, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shlxi", TILEGX_OPC_SHLXI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "shrs", TILEGX_OPC_SHRS, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shrsi", TILEGX_OPC_SHRSI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shru", TILEGX_OPC_SHRU, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "shrui", TILEGX_OPC_SHRUI, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
- },
- { "shrux", TILEGX_OPC_SHRUX, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "shruxi", TILEGX_OPC_SHRUXI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "shufflebytes", TILEGX_OPC_SHUFFLEBYTES, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "st", TILEGX_OPC_ST, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st1", TILEGX_OPC_ST1, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st1_add", TILEGX_OPC_ST1_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "st2", TILEGX_OPC_ST2, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st2_add", TILEGX_OPC_ST2_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "st4", TILEGX_OPC_ST4, 0x12, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
- },
- { "st4_add", TILEGX_OPC_ST4_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "st_add", TILEGX_OPC_ST_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt", TILEGX_OPC_STNT, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt1", TILEGX_OPC_STNT1, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt1_add", TILEGX_OPC_STNT1_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt2", TILEGX_OPC_STNT2, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt2_add", TILEGX_OPC_STNT2_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt4", TILEGX_OPC_STNT4, 0x2, 2, TREG_ZERO, 1,
- { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt4_add", TILEGX_OPC_STNT4_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "stnt_add", TILEGX_OPC_STNT_ADD, 0x2, 3, TREG_ZERO, 1,
- { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
- },
- { "sub", TILEGX_OPC_SUB, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "subx", TILEGX_OPC_SUBX, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "subxsc", TILEGX_OPC_SUBXSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "swint0", TILEGX_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint1", TILEGX_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint2", TILEGX_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "swint3", TILEGX_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0,
- { { 0, }, { }, { 0, }, { 0, }, { 0, } },
- },
- { "tblidxb0", TILEGX_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "tblidxb1", TILEGX_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "tblidxb2", TILEGX_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "tblidxb3", TILEGX_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1,
- { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
- },
- { "v1add", TILEGX_OPC_V1ADD, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1addi", TILEGX_OPC_V1ADDI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1adduc", TILEGX_OPC_V1ADDUC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1adiffu", TILEGX_OPC_V1ADIFFU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1avgu", TILEGX_OPC_V1AVGU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpeq", TILEGX_OPC_V1CMPEQ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpeqi", TILEGX_OPC_V1CMPEQI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmples", TILEGX_OPC_V1CMPLES, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpleu", TILEGX_OPC_V1CMPLEU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmplts", TILEGX_OPC_V1CMPLTS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpltsi", TILEGX_OPC_V1CMPLTSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpltu", TILEGX_OPC_V1CMPLTU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpltui", TILEGX_OPC_V1CMPLTUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1cmpne", TILEGX_OPC_V1CMPNE, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpu", TILEGX_OPC_V1DDOTPU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpua", TILEGX_OPC_V1DDOTPUA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpus", TILEGX_OPC_V1DDOTPUS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1ddotpusa", TILEGX_OPC_V1DDOTPUSA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotp", TILEGX_OPC_V1DOTP, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpa", TILEGX_OPC_V1DOTPA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpu", TILEGX_OPC_V1DOTPU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpua", TILEGX_OPC_V1DOTPUA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpus", TILEGX_OPC_V1DOTPUS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1dotpusa", TILEGX_OPC_V1DOTPUSA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1int_h", TILEGX_OPC_V1INT_H, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1int_l", TILEGX_OPC_V1INT_L, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1maxu", TILEGX_OPC_V1MAXU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1maxui", TILEGX_OPC_V1MAXUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1minu", TILEGX_OPC_V1MINU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1minui", TILEGX_OPC_V1MINUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mnz", TILEGX_OPC_V1MNZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1multu", TILEGX_OPC_V1MULTU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mulu", TILEGX_OPC_V1MULU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mulus", TILEGX_OPC_V1MULUS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1mz", TILEGX_OPC_V1MZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1sadau", TILEGX_OPC_V1SADAU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1sadu", TILEGX_OPC_V1SADU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shl", TILEGX_OPC_V1SHL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shli", TILEGX_OPC_V1SHLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shrs", TILEGX_OPC_V1SHRS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shrsi", TILEGX_OPC_V1SHRSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shru", TILEGX_OPC_V1SHRU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1shrui", TILEGX_OPC_V1SHRUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1sub", TILEGX_OPC_V1SUB, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v1subuc", TILEGX_OPC_V1SUBUC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2add", TILEGX_OPC_V2ADD, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2addi", TILEGX_OPC_V2ADDI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2addsc", TILEGX_OPC_V2ADDSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2adiffs", TILEGX_OPC_V2ADIFFS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2avgs", TILEGX_OPC_V2AVGS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpeq", TILEGX_OPC_V2CMPEQ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpeqi", TILEGX_OPC_V2CMPEQI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmples", TILEGX_OPC_V2CMPLES, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpleu", TILEGX_OPC_V2CMPLEU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmplts", TILEGX_OPC_V2CMPLTS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpltsi", TILEGX_OPC_V2CMPLTSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpltu", TILEGX_OPC_V2CMPLTU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpltui", TILEGX_OPC_V2CMPLTUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2cmpne", TILEGX_OPC_V2CMPNE, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2dotp", TILEGX_OPC_V2DOTP, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2dotpa", TILEGX_OPC_V2DOTPA, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2int_h", TILEGX_OPC_V2INT_H, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2int_l", TILEGX_OPC_V2INT_L, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2maxs", TILEGX_OPC_V2MAXS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2maxsi", TILEGX_OPC_V2MAXSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mins", TILEGX_OPC_V2MINS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2minsi", TILEGX_OPC_V2MINSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mnz", TILEGX_OPC_V2MNZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mulfsc", TILEGX_OPC_V2MULFSC, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2muls", TILEGX_OPC_V2MULS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mults", TILEGX_OPC_V2MULTS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2mz", TILEGX_OPC_V2MZ, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2packh", TILEGX_OPC_V2PACKH, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2packl", TILEGX_OPC_V2PACKL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2packuc", TILEGX_OPC_V2PACKUC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sadas", TILEGX_OPC_V2SADAS, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sadau", TILEGX_OPC_V2SADAU, 0x1, 3, TREG_ZERO, 1,
- { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sads", TILEGX_OPC_V2SADS, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sadu", TILEGX_OPC_V2SADU, 0x1, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shl", TILEGX_OPC_V2SHL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shli", TILEGX_OPC_V2SHLI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shlsc", TILEGX_OPC_V2SHLSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shrs", TILEGX_OPC_V2SHRS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shrsi", TILEGX_OPC_V2SHRSI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shru", TILEGX_OPC_V2SHRU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2shrui", TILEGX_OPC_V2SHRUI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2sub", TILEGX_OPC_V2SUB, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v2subsc", TILEGX_OPC_V2SUBSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4add", TILEGX_OPC_V4ADD, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4addsc", TILEGX_OPC_V4ADDSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4int_h", TILEGX_OPC_V4INT_H, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4int_l", TILEGX_OPC_V4INT_L, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4packsc", TILEGX_OPC_V4PACKSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shl", TILEGX_OPC_V4SHL, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shlsc", TILEGX_OPC_V4SHLSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shrs", TILEGX_OPC_V4SHRS, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4shru", TILEGX_OPC_V4SHRU, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4sub", TILEGX_OPC_V4SUB, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "v4subsc", TILEGX_OPC_V4SUBSC, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
- },
- { "wh64", TILEGX_OPC_WH64, 0x2, 1, TREG_ZERO, 1,
- { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
- },
- { "xor", TILEGX_OPC_XOR, 0xf, 3, TREG_ZERO, 1,
- { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
- },
- { "xori", TILEGX_OPC_XORI, 0x3, 3, TREG_ZERO, 1,
- { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
- },
- { NULL, TILEGX_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } },
- }
-};
-#define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6))
-#define CHILD(array_index) (TILEGX_OPC_NONE + (array_index))
-
-static const unsigned short decode_X0_fsm[936] =
-{
- BITFIELD(22, 9) /* index 0 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BFEXTS,
- TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTU,
- TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFINS,
- TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_MM,
- TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(528), CHILD(578),
- CHILD(583), CHILD(588), CHILD(593), CHILD(598), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, CHILD(603), CHILD(620), CHILD(637), CHILD(654), CHILD(671),
- CHILD(703), CHILD(797), CHILD(814), CHILD(831), CHILD(848), CHILD(865),
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, CHILD(889), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
- BITFIELD(6, 2) /* index 513 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
- BITFIELD(8, 2) /* index 518 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
- BITFIELD(10, 2) /* index 523 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
- BITFIELD(20, 2) /* index 528 */,
- TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
- BITFIELD(6, 2) /* index 533 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
- BITFIELD(8, 2) /* index 538 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
- BITFIELD(10, 2) /* index 543 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(0, 2) /* index 548 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
- BITFIELD(2, 2) /* index 553 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
- BITFIELD(4, 2) /* index 558 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
- BITFIELD(6, 2) /* index 563 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
- BITFIELD(8, 2) /* index 568 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
- BITFIELD(10, 2) /* index 573 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(20, 2) /* index 578 */,
- TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, TILEGX_OPC_ORI,
- BITFIELD(20, 2) /* index 583 */,
- TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI, TILEGX_OPC_V1CMPLTSI,
- TILEGX_OPC_V1CMPLTUI,
- BITFIELD(20, 2) /* index 588 */,
- TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI, TILEGX_OPC_V2ADDI,
- TILEGX_OPC_V2CMPEQI,
- BITFIELD(20, 2) /* index 593 */,
- TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI, TILEGX_OPC_V2MAXSI,
- TILEGX_OPC_V2MINSI,
- BITFIELD(20, 2) /* index 598 */,
- TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(18, 4) /* index 603 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
- TILEGX_OPC_AND, TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_CMPEQ,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- TILEGX_OPC_CMPNE, TILEGX_OPC_CMULAF, TILEGX_OPC_CMULA, TILEGX_OPC_CMULFR,
- BITFIELD(18, 4) /* index 620 */,
- TILEGX_OPC_CMULF, TILEGX_OPC_CMULHR, TILEGX_OPC_CMULH, TILEGX_OPC_CMUL,
- TILEGX_OPC_CRC32_32, TILEGX_OPC_CRC32_8, TILEGX_OPC_DBLALIGN2,
- TILEGX_OPC_DBLALIGN4, TILEGX_OPC_DBLALIGN6, TILEGX_OPC_DBLALIGN,
- TILEGX_OPC_FDOUBLE_ADDSUB, TILEGX_OPC_FDOUBLE_ADD_FLAGS,
- TILEGX_OPC_FDOUBLE_MUL_FLAGS, TILEGX_OPC_FDOUBLE_PACK1,
- TILEGX_OPC_FDOUBLE_PACK2, TILEGX_OPC_FDOUBLE_SUB_FLAGS,
- BITFIELD(18, 4) /* index 637 */,
- TILEGX_OPC_FDOUBLE_UNPACK_MAX, TILEGX_OPC_FDOUBLE_UNPACK_MIN,
- TILEGX_OPC_FSINGLE_ADD1, TILEGX_OPC_FSINGLE_ADDSUB2,
- TILEGX_OPC_FSINGLE_MUL1, TILEGX_OPC_FSINGLE_MUL2, TILEGX_OPC_FSINGLE_PACK2,
- TILEGX_OPC_FSINGLE_SUB1, TILEGX_OPC_MNZ, TILEGX_OPC_MULAX,
- TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HS_HU, TILEGX_OPC_MULA_HS_LS,
- TILEGX_OPC_MULA_HS_LU, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_HU_LS,
- BITFIELD(18, 4) /* index 654 */,
- TILEGX_OPC_MULA_HU_LU, TILEGX_OPC_MULA_LS_LS, TILEGX_OPC_MULA_LS_LU,
- TILEGX_OPC_MULA_LU_LU, TILEGX_OPC_MULX, TILEGX_OPC_MUL_HS_HS,
- TILEGX_OPC_MUL_HS_HU, TILEGX_OPC_MUL_HS_LS, TILEGX_OPC_MUL_HS_LU,
- TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_HU_LS, TILEGX_OPC_MUL_HU_LU,
- TILEGX_OPC_MUL_LS_LS, TILEGX_OPC_MUL_LS_LU, TILEGX_OPC_MUL_LU_LU,
- TILEGX_OPC_MZ,
- BITFIELD(18, 4) /* index 671 */,
- TILEGX_OPC_NOR, CHILD(688), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
- TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_SHUFFLEBYTES,
- TILEGX_OPC_SUBXSC,
- BITFIELD(12, 2) /* index 688 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(693),
- BITFIELD(14, 2) /* index 693 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(698),
- BITFIELD(16, 2) /* index 698 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(18, 4) /* index 703 */,
- TILEGX_OPC_SUBX, TILEGX_OPC_SUB, CHILD(720), TILEGX_OPC_V1ADDUC,
- TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADIFFU, TILEGX_OPC_V1AVGU,
- TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1DDOTPUSA, TILEGX_OPC_V1DDOTPUS, TILEGX_OPC_V1DOTPA,
- BITFIELD(12, 4) /* index 720 */,
- TILEGX_OPC_NONE, CHILD(737), CHILD(742), CHILD(747), CHILD(752), CHILD(757),
- CHILD(762), CHILD(767), CHILD(772), CHILD(777), CHILD(782), CHILD(787),
- CHILD(792), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 737 */,
- TILEGX_OPC_CLZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 742 */,
- TILEGX_OPC_CTZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 747 */,
- TILEGX_OPC_FNOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 752 */,
- TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 757 */,
- TILEGX_OPC_NOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 762 */,
- TILEGX_OPC_PCNT, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 767 */,
- TILEGX_OPC_REVBITS, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 772 */,
- TILEGX_OPC_REVBYTES, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 777 */,
- TILEGX_OPC_TBLIDXB0, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 782 */,
- TILEGX_OPC_TBLIDXB1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 787 */,
- TILEGX_OPC_TBLIDXB2, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(16, 2) /* index 792 */,
- TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(18, 4) /* index 797 */,
- TILEGX_OPC_V1DOTPUSA, TILEGX_OPC_V1DOTPUS, TILEGX_OPC_V1DOTP,
- TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1MAXU,
- TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MULTU, TILEGX_OPC_V1MULUS,
- TILEGX_OPC_V1MULU, TILEGX_OPC_V1MZ, TILEGX_OPC_V1SADAU, TILEGX_OPC_V1SADU,
- TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS,
- BITFIELD(18, 4) /* index 814 */,
- TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC, TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC,
- TILEGX_OPC_V2ADD, TILEGX_OPC_V2ADIFFS, TILEGX_OPC_V2AVGS,
- TILEGX_OPC_V2CMPEQ, TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU,
- TILEGX_OPC_V2CMPLTS, TILEGX_OPC_V2CMPLTU, TILEGX_OPC_V2CMPNE,
- TILEGX_OPC_V2DOTPA, TILEGX_OPC_V2DOTP, TILEGX_OPC_V2INT_H,
- BITFIELD(18, 4) /* index 831 */,
- TILEGX_OPC_V2INT_L, TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ,
- TILEGX_OPC_V2MULFSC, TILEGX_OPC_V2MULS, TILEGX_OPC_V2MULTS, TILEGX_OPC_V2MZ,
- TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
- TILEGX_OPC_V2SADAS, TILEGX_OPC_V2SADAU, TILEGX_OPC_V2SADS,
- TILEGX_OPC_V2SADU, TILEGX_OPC_V2SHLSC,
- BITFIELD(18, 4) /* index 848 */,
- TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU, TILEGX_OPC_V2SUBSC,
- TILEGX_OPC_V2SUB, TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
- TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
- TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
- TILEGX_OPC_V4SUB,
- BITFIELD(18, 3) /* index 865 */,
- CHILD(874), CHILD(877), CHILD(880), CHILD(883), CHILD(886), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 874 */,
- TILEGX_OPC_XOR, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 877 */,
- TILEGX_OPC_V1DDOTPUA, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 880 */,
- TILEGX_OPC_V1DDOTPU, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 883 */,
- TILEGX_OPC_V1DOTPUA, TILEGX_OPC_NONE,
- BITFIELD(21, 1) /* index 886 */,
- TILEGX_OPC_V1DOTPU, TILEGX_OPC_NONE,
- BITFIELD(18, 4) /* index 889 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
- TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
- TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
- TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE,
- BITFIELD(0, 2) /* index 906 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(911),
- BITFIELD(2, 2) /* index 911 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(916),
- BITFIELD(4, 2) /* index 916 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(921),
- BITFIELD(6, 2) /* index 921 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(926),
- BITFIELD(8, 2) /* index 926 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(931),
- BITFIELD(10, 2) /* index 931 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- TILEGX_OPC_INFOL,
-};
-
-static const unsigned short decode_X1_fsm[1206] =
-{
- BITFIELD(53, 9) /* index 0 */,
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
- CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
- TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BEQZT,
- TILEGX_OPC_BEQZT, TILEGX_OPC_BEQZ, TILEGX_OPC_BEQZ, TILEGX_OPC_BGEZT,
- TILEGX_OPC_BGEZT, TILEGX_OPC_BGEZ, TILEGX_OPC_BGEZ, TILEGX_OPC_BGTZT,
- TILEGX_OPC_BGTZT, TILEGX_OPC_BGTZ, TILEGX_OPC_BGTZ, TILEGX_OPC_BLBCT,
- TILEGX_OPC_BLBCT, TILEGX_OPC_BLBC, TILEGX_OPC_BLBC, TILEGX_OPC_BLBST,
- TILEGX_OPC_BLBST, TILEGX_OPC_BLBS, TILEGX_OPC_BLBS, TILEGX_OPC_BLEZT,
- TILEGX_OPC_BLEZT, TILEGX_OPC_BLEZ, TILEGX_OPC_BLEZ, TILEGX_OPC_BLTZT,
- TILEGX_OPC_BLTZT, TILEGX_OPC_BLTZ, TILEGX_OPC_BLTZ, TILEGX_OPC_BNEZT,
- TILEGX_OPC_BNEZT, TILEGX_OPC_BNEZ, TILEGX_OPC_BNEZ, CHILD(528), CHILD(578),
- CHILD(598), CHILD(663), CHILD(683), CHILD(688), CHILD(693), CHILD(698),
- CHILD(703), CHILD(708), CHILD(713), CHILD(718), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
- TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
- CHILD(723), CHILD(740), CHILD(772), CHILD(789), CHILD(1108), CHILD(1125),
- CHILD(1142), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1159), TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
- CHILD(1176),
- BITFIELD(37, 2) /* index 513 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
- BITFIELD(39, 2) /* index 518 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
- BITFIELD(41, 2) /* index 523 */,
- TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
- BITFIELD(51, 2) /* index 528 */,
- TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
- BITFIELD(37, 2) /* index 533 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
- BITFIELD(39, 2) /* index 538 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
- BITFIELD(41, 2) /* index 543 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(31, 2) /* index 548 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
- BITFIELD(33, 2) /* index 553 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
- BITFIELD(35, 2) /* index 558 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
- BITFIELD(37, 2) /* index 563 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
- BITFIELD(39, 2) /* index 568 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
- BITFIELD(41, 2) /* index 573 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(51, 2) /* index 578 */,
- TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, CHILD(583),
- BITFIELD(31, 2) /* index 583 */,
- TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(588),
- BITFIELD(33, 2) /* index 588 */,
- TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(593),
- BITFIELD(35, 2) /* index 593 */,
- TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD,
- TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
- BITFIELD(51, 2) /* index 598 */,
- CHILD(603), CHILD(618), CHILD(633), CHILD(648),
- BITFIELD(31, 2) /* index 603 */,
- TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(608),
- BITFIELD(33, 2) /* index 608 */,
- TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(613),
- BITFIELD(35, 2) /* index 613 */,
- TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD,
- TILEGX_OPC_PREFETCH_ADD_L1,
- BITFIELD(31, 2) /* index 618 */,
- TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(623),
- BITFIELD(33, 2) /* index 623 */,
- TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(628),
- BITFIELD(35, 2) /* index 628 */,
- TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD,
- TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
- BITFIELD(31, 2) /* index 633 */,
- TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(638),
- BITFIELD(33, 2) /* index 638 */,
- TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(643),
- BITFIELD(35, 2) /* index 643 */,
- TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD,
- TILEGX_OPC_PREFETCH_ADD_L2,
- BITFIELD(31, 2) /* index 648 */,
- TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(653),
- BITFIELD(33, 2) /* index 653 */,
- TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(658),
- BITFIELD(35, 2) /* index 658 */,
- TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
- TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
- BITFIELD(51, 2) /* index 663 */,
- CHILD(668), TILEGX_OPC_LDNT1S_ADD, TILEGX_OPC_LDNT1U_ADD,
- TILEGX_OPC_LDNT2S_ADD,
- BITFIELD(31, 2) /* index 668 */,
- TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(673),
- BITFIELD(33, 2) /* index 673 */,
- TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(678),
- BITFIELD(35, 2) /* index 678 */,
- TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD,
- TILEGX_OPC_PREFETCH_ADD_L3,
- BITFIELD(51, 2) /* index 683 */,
- TILEGX_OPC_LDNT2U_ADD, TILEGX_OPC_LDNT4S_ADD, TILEGX_OPC_LDNT4U_ADD,
- TILEGX_OPC_LDNT_ADD,
- BITFIELD(51, 2) /* index 688 */,
- TILEGX_OPC_LD_ADD, TILEGX_OPC_LDNA_ADD, TILEGX_OPC_MFSPR, TILEGX_OPC_MTSPR,
- BITFIELD(51, 2) /* index 693 */,
- TILEGX_OPC_ORI, TILEGX_OPC_ST1_ADD, TILEGX_OPC_ST2_ADD, TILEGX_OPC_ST4_ADD,
- BITFIELD(51, 2) /* index 698 */,
- TILEGX_OPC_STNT1_ADD, TILEGX_OPC_STNT2_ADD, TILEGX_OPC_STNT4_ADD,
- TILEGX_OPC_STNT_ADD,
- BITFIELD(51, 2) /* index 703 */,
- TILEGX_OPC_ST_ADD, TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI,
- TILEGX_OPC_V1CMPLTSI,
- BITFIELD(51, 2) /* index 708 */,
- TILEGX_OPC_V1CMPLTUI, TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI,
- TILEGX_OPC_V2ADDI,
- BITFIELD(51, 2) /* index 713 */,
- TILEGX_OPC_V2CMPEQI, TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI,
- TILEGX_OPC_V2MAXSI,
- BITFIELD(51, 2) /* index 718 */,
- TILEGX_OPC_V2MINSI, TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(49, 4) /* index 723 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
- TILEGX_OPC_AND, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPEXCH4, TILEGX_OPC_CMPEXCH,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- TILEGX_OPC_CMPNE, TILEGX_OPC_DBLALIGN2, TILEGX_OPC_DBLALIGN4,
- TILEGX_OPC_DBLALIGN6,
- BITFIELD(49, 4) /* index 740 */,
- TILEGX_OPC_EXCH4, TILEGX_OPC_EXCH, TILEGX_OPC_FETCHADD4,
- TILEGX_OPC_FETCHADDGEZ4, TILEGX_OPC_FETCHADDGEZ, TILEGX_OPC_FETCHADD,
- TILEGX_OPC_FETCHAND4, TILEGX_OPC_FETCHAND, TILEGX_OPC_FETCHOR4,
- TILEGX_OPC_FETCHOR, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, TILEGX_OPC_NOR,
- CHILD(757), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
- BITFIELD(43, 2) /* index 757 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(762),
- BITFIELD(45, 2) /* index 762 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(767),
- BITFIELD(47, 2) /* index 767 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(49, 4) /* index 772 */,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
- TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_ST1,
- TILEGX_OPC_ST2, TILEGX_OPC_ST4, TILEGX_OPC_STNT1, TILEGX_OPC_STNT2,
- TILEGX_OPC_STNT4,
- BITFIELD(46, 7) /* index 789 */,
- TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
- TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
- TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST,
- TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_SUBXSC,
- TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC,
- TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBX,
- TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX,
- TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
- TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB,
- TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, CHILD(918), CHILD(927),
- CHILD(1006), CHILD(1090), CHILD(1099), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
- TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
- TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
- TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
- TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
- TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
- TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
- TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
- TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
- TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
- TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
- TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
- TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
- TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
- TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
- TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
- TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
- TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
- TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
- TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
- TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
- TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
- BITFIELD(43, 3) /* index 918 */,
- TILEGX_OPC_NONE, TILEGX_OPC_DRAIN, TILEGX_OPC_DTLBPR, TILEGX_OPC_FINV,
- TILEGX_OPC_FLUSHWB, TILEGX_OPC_FLUSH, TILEGX_OPC_FNOP, TILEGX_OPC_ICOH,
- BITFIELD(43, 3) /* index 927 */,
- CHILD(936), TILEGX_OPC_INV, TILEGX_OPC_IRET, TILEGX_OPC_JALRP,
- TILEGX_OPC_JALR, TILEGX_OPC_JRP, TILEGX_OPC_JR, CHILD(991),
- BITFIELD(31, 2) /* index 936 */,
- CHILD(941), CHILD(966), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(33, 2) /* index 941 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(946),
- BITFIELD(35, 2) /* index 946 */,
- TILEGX_OPC_ILL, CHILD(951), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(37, 2) /* index 951 */,
- TILEGX_OPC_ILL, CHILD(956), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(39, 2) /* index 956 */,
- TILEGX_OPC_ILL, CHILD(961), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(41, 2) /* index 961 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_BPT, TILEGX_OPC_ILL,
- BITFIELD(33, 2) /* index 966 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(971),
- BITFIELD(35, 2) /* index 971 */,
- TILEGX_OPC_ILL, CHILD(976), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(37, 2) /* index 976 */,
- TILEGX_OPC_ILL, CHILD(981), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(39, 2) /* index 981 */,
- TILEGX_OPC_ILL, CHILD(986), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
- BITFIELD(41, 2) /* index 986 */,
- TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_RAISE, TILEGX_OPC_ILL,
- BITFIELD(31, 2) /* index 991 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(996),
- BITFIELD(33, 2) /* index 996 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(1001),
- BITFIELD(35, 2) /* index 1001 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
- TILEGX_OPC_PREFETCH_L1_FAULT,
- BITFIELD(43, 3) /* index 1006 */,
- CHILD(1015), CHILD(1030), CHILD(1045), CHILD(1060), CHILD(1075),
- TILEGX_OPC_LDNA, TILEGX_OPC_LDNT1S, TILEGX_OPC_LDNT1U,
- BITFIELD(31, 2) /* index 1015 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1020),
- BITFIELD(33, 2) /* index 1020 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1025),
- BITFIELD(35, 2) /* index 1025 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
- BITFIELD(31, 2) /* index 1030 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1035),
- BITFIELD(33, 2) /* index 1035 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1040),
- BITFIELD(35, 2) /* index 1040 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
- TILEGX_OPC_PREFETCH_L2_FAULT,
- BITFIELD(31, 2) /* index 1045 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1050),
- BITFIELD(33, 2) /* index 1050 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1055),
- BITFIELD(35, 2) /* index 1055 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
- BITFIELD(31, 2) /* index 1060 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1065),
- BITFIELD(33, 2) /* index 1065 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1070),
- BITFIELD(35, 2) /* index 1070 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S,
- TILEGX_OPC_PREFETCH_L3_FAULT,
- BITFIELD(31, 2) /* index 1075 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1080),
- BITFIELD(33, 2) /* index 1080 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1085),
- BITFIELD(35, 2) /* index 1085 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
- BITFIELD(43, 3) /* index 1090 */,
- TILEGX_OPC_LDNT2S, TILEGX_OPC_LDNT2U, TILEGX_OPC_LDNT4S, TILEGX_OPC_LDNT4U,
- TILEGX_OPC_LDNT, TILEGX_OPC_LD, TILEGX_OPC_LNK, TILEGX_OPC_MF,
- BITFIELD(43, 3) /* index 1099 */,
- TILEGX_OPC_NAP, TILEGX_OPC_NOP, TILEGX_OPC_SWINT0, TILEGX_OPC_SWINT1,
- TILEGX_OPC_SWINT2, TILEGX_OPC_SWINT3, TILEGX_OPC_WH64, TILEGX_OPC_NONE,
- BITFIELD(49, 4) /* index 1108 */,
- TILEGX_OPC_V1MAXU, TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MZ,
- TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS, TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC,
- TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC, TILEGX_OPC_V2ADD, TILEGX_OPC_V2CMPEQ,
- TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU, TILEGX_OPC_V2CMPLTS,
- TILEGX_OPC_V2CMPLTU,
- BITFIELD(49, 4) /* index 1125 */,
- TILEGX_OPC_V2CMPNE, TILEGX_OPC_V2INT_H, TILEGX_OPC_V2INT_L,
- TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ, TILEGX_OPC_V2MZ,
- TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
- TILEGX_OPC_V2SHLSC, TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU,
- TILEGX_OPC_V2SUBSC, TILEGX_OPC_V2SUB,
- BITFIELD(49, 4) /* index 1142 */,
- TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
- TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
- TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
- TILEGX_OPC_V4SUB, TILEGX_OPC_XOR, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(49, 4) /* index 1159 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
- TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
- TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
- TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE,
- BITFIELD(31, 2) /* index 1176 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1181),
- BITFIELD(33, 2) /* index 1181 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1186),
- BITFIELD(35, 2) /* index 1186 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1191),
- BITFIELD(37, 2) /* index 1191 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1196),
- BITFIELD(39, 2) /* index 1196 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- CHILD(1201),
- BITFIELD(41, 2) /* index 1201 */,
- TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
- TILEGX_OPC_INFOL,
-};
-
-static const unsigned short decode_Y0_fsm[178] =
-{
- BITFIELD(27, 4) /* index 0 */,
- CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
- TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(118), CHILD(123),
- CHILD(128), CHILD(133), CHILD(153), CHILD(158), CHILD(163), CHILD(168),
- CHILD(173),
- BITFIELD(6, 2) /* index 17 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
- BITFIELD(8, 2) /* index 22 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
- BITFIELD(10, 2) /* index 27 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(0, 2) /* index 32 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
- BITFIELD(2, 2) /* index 37 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
- BITFIELD(4, 2) /* index 42 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
- BITFIELD(6, 2) /* index 47 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
- BITFIELD(8, 2) /* index 52 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
- BITFIELD(10, 2) /* index 57 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(18, 2) /* index 62 */,
- TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
- BITFIELD(15, 5) /* index 67 */,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
- TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
- TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(100),
- CHILD(109), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(12, 3) /* index 100 */,
- TILEGX_OPC_NONE, TILEGX_OPC_CLZ, TILEGX_OPC_CTZ, TILEGX_OPC_FNOP,
- TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NOP, TILEGX_OPC_PCNT,
- TILEGX_OPC_REVBITS,
- BITFIELD(12, 3) /* index 109 */,
- TILEGX_OPC_REVBYTES, TILEGX_OPC_TBLIDXB0, TILEGX_OPC_TBLIDXB1,
- TILEGX_OPC_TBLIDXB2, TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- TILEGX_OPC_NONE,
- BITFIELD(18, 2) /* index 118 */,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- BITFIELD(18, 2) /* index 123 */,
- TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE, TILEGX_OPC_MULAX, TILEGX_OPC_MULX,
- BITFIELD(18, 2) /* index 128 */,
- TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
- BITFIELD(18, 2) /* index 133 */,
- TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(138), TILEGX_OPC_XOR,
- BITFIELD(12, 2) /* index 138 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(143),
- BITFIELD(14, 2) /* index 143 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(148),
- BITFIELD(16, 2) /* index 148 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(18, 2) /* index 153 */,
- TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
- BITFIELD(18, 2) /* index 158 */,
- TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
- TILEGX_OPC_SHL3ADDX,
- BITFIELD(18, 2) /* index 163 */,
- TILEGX_OPC_MUL_HS_HS, TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_LS_LS,
- TILEGX_OPC_MUL_LU_LU,
- BITFIELD(18, 2) /* index 168 */,
- TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_LS_LS,
- TILEGX_OPC_MULA_LU_LU,
- BITFIELD(18, 2) /* index 173 */,
- TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
-};
-
-static const unsigned short decode_Y1_fsm[167] =
-{
- BITFIELD(58, 4) /* index 0 */,
- TILEGX_OPC_NONE, CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
- TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(117), CHILD(122),
- CHILD(127), CHILD(132), CHILD(152), CHILD(157), CHILD(162), TILEGX_OPC_NONE,
- BITFIELD(37, 2) /* index 17 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
- BITFIELD(39, 2) /* index 22 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
- BITFIELD(41, 2) /* index 27 */,
- TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
- BITFIELD(31, 2) /* index 32 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
- BITFIELD(33, 2) /* index 37 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
- BITFIELD(35, 2) /* index 42 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
- BITFIELD(37, 2) /* index 47 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
- BITFIELD(39, 2) /* index 52 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
- BITFIELD(41, 2) /* index 57 */,
- TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
- BITFIELD(49, 2) /* index 62 */,
- TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
- BITFIELD(47, 4) /* index 67 */,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
- TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
- TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD,
- TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(84),
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
- BITFIELD(43, 3) /* index 84 */,
- CHILD(93), CHILD(96), CHILD(99), CHILD(102), CHILD(105), CHILD(108),
- CHILD(111), CHILD(114),
- BITFIELD(46, 1) /* index 93 */,
- TILEGX_OPC_NONE, TILEGX_OPC_FNOP,
- BITFIELD(46, 1) /* index 96 */,
- TILEGX_OPC_NONE, TILEGX_OPC_ILL,
- BITFIELD(46, 1) /* index 99 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JALRP,
- BITFIELD(46, 1) /* index 102 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JALR,
- BITFIELD(46, 1) /* index 105 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JRP,
- BITFIELD(46, 1) /* index 108 */,
- TILEGX_OPC_NONE, TILEGX_OPC_JR,
- BITFIELD(46, 1) /* index 111 */,
- TILEGX_OPC_NONE, TILEGX_OPC_LNK,
- BITFIELD(46, 1) /* index 114 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NOP,
- BITFIELD(49, 2) /* index 117 */,
- TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
- BITFIELD(49, 2) /* index 122 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE,
- BITFIELD(49, 2) /* index 127 */,
- TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
- BITFIELD(49, 2) /* index 132 */,
- TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(137), TILEGX_OPC_XOR,
- BITFIELD(43, 2) /* index 137 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(142),
- BITFIELD(45, 2) /* index 142 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(147),
- BITFIELD(47, 2) /* index 147 */,
- TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
- BITFIELD(49, 2) /* index 152 */,
- TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
- BITFIELD(49, 2) /* index 157 */,
- TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
- TILEGX_OPC_SHL3ADDX,
- BITFIELD(49, 2) /* index 162 */,
- TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
-};
-
-static const unsigned short decode_Y2_fsm[118] =
-{
- BITFIELD(62, 2) /* index 0 */,
- TILEGX_OPC_NONE, CHILD(5), CHILD(66), CHILD(109),
- BITFIELD(55, 3) /* index 5 */,
- CHILD(14), CHILD(14), CHILD(14), CHILD(17), CHILD(40), CHILD(40), CHILD(40),
- CHILD(43),
- BITFIELD(26, 1) /* index 14 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1U,
- BITFIELD(26, 1) /* index 17 */,
- CHILD(20), CHILD(30),
- BITFIELD(51, 2) /* index 20 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(25),
- BITFIELD(53, 2) /* index 25 */,
- TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
- TILEGX_OPC_PREFETCH_L1_FAULT,
- BITFIELD(51, 2) /* index 30 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(35),
- BITFIELD(53, 2) /* index 35 */,
- TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
- BITFIELD(26, 1) /* index 40 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2U,
- BITFIELD(26, 1) /* index 43 */,
- CHILD(46), CHILD(56),
- BITFIELD(51, 2) /* index 46 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(51),
- BITFIELD(53, 2) /* index 51 */,
- TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
- TILEGX_OPC_PREFETCH_L2_FAULT,
- BITFIELD(51, 2) /* index 56 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(61),
- BITFIELD(53, 2) /* index 61 */,
- TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
- BITFIELD(56, 2) /* index 66 */,
- CHILD(71), CHILD(74), CHILD(90), CHILD(93),
- BITFIELD(26, 1) /* index 71 */,
- TILEGX_OPC_NONE, TILEGX_OPC_LD4S,
- BITFIELD(26, 1) /* index 74 */,
- TILEGX_OPC_NONE, CHILD(77),
- BITFIELD(51, 2) /* index 77 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(82),
- BITFIELD(53, 2) /* index 82 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(87),
- BITFIELD(55, 1) /* index 87 */,
- TILEGX_OPC_LD4S, TILEGX_OPC_PREFETCH_L3_FAULT,
- BITFIELD(26, 1) /* index 90 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD,
- BITFIELD(26, 1) /* index 93 */,
- CHILD(96), TILEGX_OPC_LD,
- BITFIELD(51, 2) /* index 96 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(101),
- BITFIELD(53, 2) /* index 101 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(106),
- BITFIELD(55, 1) /* index 106 */,
- TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
- BITFIELD(26, 1) /* index 109 */,
- CHILD(112), CHILD(115),
- BITFIELD(57, 1) /* index 112 */,
- TILEGX_OPC_ST1, TILEGX_OPC_ST4,
- BITFIELD(57, 1) /* index 115 */,
- TILEGX_OPC_ST2, TILEGX_OPC_ST,
-};
-
-#undef BITFIELD
-#undef CHILD
-const unsigned short * const
-tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS] =
-{
- decode_X0_fsm,
- decode_X1_fsm,
- decode_Y0_fsm,
- decode_Y1_fsm,
- decode_Y2_fsm
-};
-const struct tilegx_operand tilegx_operands[35] =
-{
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X0, get_Imm8_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_X1, get_Imm8_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y0),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y0, get_Imm8_Y0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y1),
- 8, 1, 0, 0, 0, 0,
- create_Imm8_Y1, get_Imm8_Y1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X0_HW0_LAST),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X0, get_Imm16_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X1_HW0_LAST),
- 16, 1, 0, 0, 0, 0,
- create_Imm16_X1, get_Imm16_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X0, get_SrcA_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_X1, get_Dest_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y0, get_SrcA_Y0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_Dest_Y1, get_Dest_Y1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y1, get_SrcA_Y1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcA_Y2, get_SrcA_Y2
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_SrcA_X1, get_SrcA_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X0, get_SrcB_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_X1, get_SrcB_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y0, get_SrcB_Y0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcB_Y1, get_SrcB_Y1
- },
- {
- TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_BROFF_X1),
- 17, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_BrOff_X1, get_BrOff_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_MMSTART_X0),
- 6, 0, 0, 0, 0, 0,
- create_BFStart_X0, get_BFStart_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_MMEND_X0),
- 6, 0, 0, 0, 0, 0,
- create_BFEnd_X0, get_BFEnd_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_X0, get_Dest_X0
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 1, 0, 0,
- create_Dest_Y0, get_Dest_Y0
- },
- {
- TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_JUMPOFF_X1),
- 27, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
- create_JumpOff_X1, get_JumpOff_X1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 0, 1, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MF_IMM14_X1),
- 14, 0, 0, 0, 0, 0,
- create_MF_Imm14_X1, get_MF_Imm14_X1
- },
- {
- TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MT_IMM14_X1),
- 14, 0, 0, 0, 0, 0,
- create_MT_Imm14_X1, get_MT_Imm14_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X0),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_X0, get_ShAmt_X0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X1),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_X1, get_ShAmt_X1
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y0),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_Y0, get_ShAmt_Y0
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y1),
- 6, 0, 0, 0, 0, 0,
- create_ShAmt_Y1, get_ShAmt_Y1
- },
- {
- TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
- 6, 0, 1, 0, 0, 0,
- create_SrcBDest_Y2, get_SrcBDest_Y2
- },
- {
- TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_DEST_IMM8_X1),
- 8, 1, 0, 0, 0, 0,
- create_Dest_Imm8_X1, get_Dest_Imm8_X1
- }
-};
-
-
-
-
-/* Given a set of bundle bits and the lookup FSM for a specific pipe,
- * returns which instruction the bundle contains in that pipe.
- */
-static const struct tilegx_opcode *
-find_opcode(tilegx_bundle_bits bits, const unsigned short *table)
-{
- int index = 0;
-
- while (1)
- {
- unsigned short bitspec = table[index];
- unsigned int bitfield =
- ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6);
-
- unsigned short next = table[index + 1 + bitfield];
- if (next <= TILEGX_OPC_NONE)
- return &tilegx_opcodes[next];
-
- index = next - TILEGX_OPC_NONE;
- }
-}
-
-
-int
-parse_insn_tilegx(tilegx_bundle_bits bits,
- unsigned long long pc,
- struct tilegx_decoded_instruction
- decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE])
-{
- int num_instructions = 0;
- int pipe;
-
- int min_pipe, max_pipe;
- if ((bits & TILEGX_BUNDLE_MODE_MASK) == 0)
- {
- min_pipe = TILEGX_PIPELINE_X0;
- max_pipe = TILEGX_PIPELINE_X1;
- }
- else
- {
- min_pipe = TILEGX_PIPELINE_Y0;
- max_pipe = TILEGX_PIPELINE_Y2;
- }
-
- /* For each pipe, find an instruction that fits. */
- for (pipe = min_pipe; pipe <= max_pipe; pipe++)
- {
- const struct tilegx_opcode *opc;
- struct tilegx_decoded_instruction *d;
- int i;
-
- d = &decoded[num_instructions++];
- opc = find_opcode (bits, tilegx_bundle_decoder_fsms[pipe]);
- d->opcode = opc;
-
- /* Decode each operand, sign extending, etc. as appropriate. */
- for (i = 0; i < opc->num_operands; i++)
- {
- const struct tilegx_operand *op =
- &tilegx_operands[opc->operands[pipe][i]];
- int raw_opval = op->extract (bits);
- long long opval;
-
- if (op->is_signed)
- {
- /* Sign-extend the operand. */
- int shift = (int)((sizeof(int) * 8) - op->num_bits);
- raw_opval = (raw_opval << shift) >> shift;
- }
-
- /* Adjust PC-relative scaled branch offsets. */
- if (op->type == TILEGX_OP_TYPE_ADDRESS)
- opval = (raw_opval * TILEGX_BUNDLE_SIZE_IN_BYTES) + pc;
- else
- opval = raw_opval;
-
- /* Record the final value. */
- d->operands[i] = op;
- d->operand_values[i] = opval;
- }
- }
-
- return num_instructions;
-}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
deleted file mode 100644
index f95d65f3162b..000000000000
--- a/arch/tile/kernel/time.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Support the cycle counter clocksource and tile timer clock event device.
- */
-
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/hardirq.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/timekeeper_internal.h>
-#include <asm/irq_regs.h>
-#include <asm/traps.h>
-#include <asm/vdso.h>
-#include <hv/hypervisor.h>
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-
-
-/*
- * Define the cycle counter clock source.
- */
-
-/* How many cycles per second we are running at. */
-static cycles_t cycles_per_sec __ro_after_init;
-
-cycles_t get_clock_rate(void)
-{
- return cycles_per_sec;
-}
-
-#if CHIP_HAS_SPLIT_CYCLE()
-cycles_t get_cycles(void)
-{
- unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
- unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
- unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH);
-
- while (unlikely(high != high2)) {
- low = __insn_mfspr(SPR_CYCLE_LOW);
- high = high2;
- high2 = __insn_mfspr(SPR_CYCLE_HIGH);
- }
-
- return (((cycles_t)high) << 32) | low;
-}
-EXPORT_SYMBOL(get_cycles);
-#endif
-
-/*
- * We use a relatively small shift value so that sched_clock()
- * won't wrap around very often.
- */
-#define SCHED_CLOCK_SHIFT 10
-
-static unsigned long sched_clock_mult __ro_after_init;
-
-static cycles_t clocksource_get_cycles(struct clocksource *cs)
-{
- return get_cycles();
-}
-
-static struct clocksource cycle_counter_cs = {
- .name = "cycle counter",
- .rating = 300,
- .read = clocksource_get_cycles,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/*
- * Called very early from setup_arch() to set cycles_per_sec.
- * We initialize it early so we can use it to set up loops_per_jiffy.
- */
-void __init setup_clock(void)
-{
- cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
- sched_clock_mult =
- clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT);
-}
-
-void __init calibrate_delay(void)
-{
- loops_per_jiffy = get_clock_rate() / HZ;
- pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n",
- loops_per_jiffy / (500000 / HZ),
- (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
-}
-
-/* Called fairly late in init/main.c, but before we go smp. */
-void __init time_init(void)
-{
- /* Initialize and register the clock source. */
- clocksource_register_hz(&cycle_counter_cs, cycles_per_sec);
-
- /* Start up the tile-timer interrupt source on the boot cpu. */
- setup_tile_timer();
-}
-
-/*
- * Define the tile timer clock event device. The timer is driven by
- * the TILE_TIMER_CONTROL register, which consists of a 31-bit down
- * counter, plus bit 31, which signifies that the counter has wrapped
- * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be
- * raised as long as bit 31 is set.
- *
- * The TILE_MINSEC value represents the largest range of real-time
- * we can possibly cover with the timer, based on MAX_TICK combined
- * with the slowest reasonable clock rate we might run at.
- */
-
-#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */
-#define TILE_MINSEC 5 /* timer covers no more than 5 seconds */
-
-static int tile_timer_set_next_event(unsigned long ticks,
- struct clock_event_device *evt)
-{
- BUG_ON(ticks > MAX_TICK);
- __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
- arch_local_irq_unmask_now(INT_TILE_TIMER);
- return 0;
-}
-
-/*
- * Whenever anyone tries to change modes, we just mask interrupts
- * and wait for the next event to get set.
- */
-static int tile_timer_shutdown(struct clock_event_device *evt)
-{
- arch_local_irq_mask_now(INT_TILE_TIMER);
- return 0;
-}
-
-/*
- * Set min_delta_ns to 1 microsecond, since it takes about
- * that long to fire the interrupt.
- */
-static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
- .name = "tile timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .min_delta_ns = 1000,
- .min_delta_ticks = 1,
- .max_delta_ticks = MAX_TICK,
- .rating = 100,
- .irq = -1,
- .set_next_event = tile_timer_set_next_event,
- .set_state_shutdown = tile_timer_shutdown,
- .set_state_oneshot = tile_timer_shutdown,
- .set_state_oneshot_stopped = tile_timer_shutdown,
- .tick_resume = tile_timer_shutdown,
-};
-
-void setup_tile_timer(void)
-{
- struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
-
- /* Fill in fields that are speed-specific. */
- clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
- evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt);
-
- /* Mark as being for this cpu only. */
- evt->cpumask = cpumask_of(smp_processor_id());
-
- /* Start out with timer not firing. */
- arch_local_irq_mask_now(INT_TILE_TIMER);
-
- /* Register tile timer. */
- clockevents_register_device(evt);
-}
-
-/* Called from the interrupt vector. */
-void do_timer_interrupt(struct pt_regs *regs, int fault_num)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
- struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
-
- /*
- * Mask the timer interrupt here, since we are a oneshot timer
- * and there are now by definition no events pending.
- */
- arch_local_irq_mask(INT_TILE_TIMER);
-
- /* Track time spent here in an interrupt context */
- irq_enter();
-
- /* Track interrupt count. */
- __this_cpu_inc(irq_stat.irq_timer_count);
-
- /* Call the generic timer handler */
- evt->event_handler(evt);
-
- /*
- * Track time spent against the current process again and
- * process any softirqs if they are waiting.
- */
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
-/*
- * Scheduler clock - returns current time in nanosec units.
- * Note that with LOCKDEP, this is called during lockdep_init(), and
- * we will claim that sched_clock() is zero for a little while, until
- * we run setup_clock(), above.
- */
-unsigned long long sched_clock(void)
-{
- return mult_frac(get_cycles(),
- sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
-}
-
-int setup_profiling_timer(unsigned int multiplier)
-{
- return -EINVAL;
-}
-
-/*
- * Use the tile timer to convert nsecs to core clock cycles, relying
- * on it having the same frequency as SPR_CYCLE.
- */
-cycles_t ns2cycles(unsigned long nsecs)
-{
- /*
- * We do not have to disable preemption here as each core has the same
- * clock frequency.
- */
- struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
-
- /*
- * as in clocksource.h and x86's timer.h, we split the calculation
- * into 2 parts to avoid unecessary overflow of the intermediate
- * value. This will not lead to any loss of precision.
- */
- u64 quot = (u64)nsecs >> dev->shift;
- u64 rem = (u64)nsecs & ((1ULL << dev->shift) - 1);
- return quot * dev->mult + ((rem * dev->mult) >> dev->shift);
-}
-
-void update_vsyscall_tz(void)
-{
- write_seqcount_begin(&vdso_data->tz_seq);
- vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
- vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- write_seqcount_end(&vdso_data->tz_seq);
-}
-
-void update_vsyscall(struct timekeeper *tk)
-{
- if (tk->tkr_mono.clock != &cycle_counter_cs)
- return;
-
- write_seqcount_begin(&vdso_data->tb_seq);
-
- vdso_data->cycle_last = tk->tkr_mono.cycle_last;
- vdso_data->mask = tk->tkr_mono.mask;
- vdso_data->mult = tk->tkr_mono.mult;
- vdso_data->shift = tk->tkr_mono.shift;
-
- vdso_data->wall_time_sec = tk->xtime_sec;
- vdso_data->wall_time_snsec = tk->tkr_mono.xtime_nsec;
-
- vdso_data->monotonic_time_sec = tk->xtime_sec
- + tk->wall_to_monotonic.tv_sec;
- vdso_data->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
- + ((u64)tk->wall_to_monotonic.tv_nsec
- << tk->tkr_mono.shift);
- while (vdso_data->monotonic_time_snsec >=
- (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
- vdso_data->monotonic_time_snsec -=
- ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
- vdso_data->monotonic_time_sec++;
- }
-
- vdso_data->wall_time_coarse_sec = tk->xtime_sec;
- vdso_data->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >>
- tk->tkr_mono.shift);
-
- vdso_data->monotonic_time_coarse_sec =
- vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
- vdso_data->monotonic_time_coarse_nsec =
- vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
-
- while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
- vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
- vdso_data->monotonic_time_coarse_sec++;
- }
-
- write_seqcount_end(&vdso_data->tb_seq);
-}
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
deleted file mode 100644
index f23b53515671..000000000000
--- a/arch/tile/kernel/tlb.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/cpumask.h>
-#include <linux/module.h>
-#include <linux/hugetlb.h>
-#include <asm/tlbflush.h>
-#include <asm/homecache.h>
-#include <hv/hypervisor.h>
-
-/* From tlbflush.h */
-DEFINE_PER_CPU(int, current_asid);
-int min_asid, max_asid;
-
-/*
- * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB
- * so that when we are unmapping an executable page, we also flush it.
- * Combined with flushing the L1I at context switch time, this means
- * we don't have to do any other icache flushes.
- */
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
- HV_Remote_ASID asids[NR_CPUS];
- int i = 0, cpu;
- for_each_cpu(cpu, mm_cpumask(mm)) {
- HV_Remote_ASID *asid = &asids[i++];
- asid->y = cpu / smp_topology.width;
- asid->x = cpu % smp_topology.width;
- asid->asid = per_cpu(current_asid, cpu);
- }
- flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
- 0, 0, 0, NULL, asids, i);
-}
-
-void flush_tlb_current_task(void)
-{
- flush_tlb_mm(current->mm);
-}
-
-void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long va)
-{
- unsigned long size = vma_kernel_pagesize(vma);
- int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
- flush_remote(0, cache, mm_cpumask(mm),
- va, size, size, mm_cpumask(mm), NULL, 0);
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-{
- flush_tlb_page_mm(vma, vma->vm_mm, va);
-}
-EXPORT_SYMBOL(flush_tlb_page);
-
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- unsigned long size = vma_kernel_pagesize(vma);
- struct mm_struct *mm = vma->vm_mm;
- int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
- flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
- mm_cpumask(mm), NULL, 0);
-}
-
-void flush_tlb_all(void)
-{
- int i;
- for (i = 0; ; ++i) {
- HV_VirtAddrRange r = hv_inquire_virtual(i);
- if (r.size == 0)
- break;
- flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
- r.start, r.size, PAGE_SIZE, cpu_online_mask,
- NULL, 0);
- flush_remote(0, 0, NULL,
- r.start, r.size, HPAGE_SIZE, cpu_online_mask,
- NULL, 0);
- }
-}
-
-/*
- * Callers need to flush the L1I themselves if necessary, e.g. for
- * kernel module unload. Otherwise we assume callers are not using
- * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus
- * will get an unnecessary interrupt otherwise.
- */
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- flush_remote(0, 0, NULL,
- start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);
-}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
deleted file mode 100644
index 83a7186198d7..000000000000
--- a/arch/tile/kernel/traps.c
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/kdebug.h>
-#include <linux/module.h>
-#include <linux/reboot.h>
-#include <linux/uaccess.h>
-#include <linux/ptrace.h>
-#include <linux/hardirq.h>
-#include <linux/nmi.h>
-#include <asm/stack.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-
-#include <arch/interrupts.h>
-#include <arch/spr_def.h>
-#include <arch/opcode.h>
-
-void __init trap_init(void)
-{
- /* Nothing needed here since we link code at .intrpt */
-}
-
-int unaligned_fixup = 1;
-
-static int __init setup_unaligned_fixup(char *str)
-{
- /*
- * Say "=-1" to completely disable it. If you just do "=0", we
- * will still parse the instruction, then fire a SIGBUS with
- * the correct address from inside the single_step code.
- */
- if (kstrtoint(str, 0, &unaligned_fixup) != 0)
- return 0;
-
- pr_info("Fixups for unaligned data accesses are %s\n",
- unaligned_fixup >= 0 ?
- (unaligned_fixup ? "enabled" : "disabled") :
- "completely disabled");
- return 1;
-}
-__setup("unaligned_fixup=", setup_unaligned_fixup);
-
-#if CHIP_HAS_TILE_DMA()
-
-static int dma_disabled;
-
-static int __init nodma(char *str)
-{
- pr_info("User-space DMA is disabled\n");
- dma_disabled = 1;
- return 1;
-}
-__setup("nodma", nodma);
-
-/* How to decode SPR_GPV_REASON */
-#define IRET_ERROR (1U << 31)
-#define MT_ERROR (1U << 30)
-#define MF_ERROR (1U << 29)
-#define SPR_INDEX ((1U << 15) - 1)
-#define SPR_MPL_SHIFT 9 /* starting bit position for MPL encoded in SPR */
-
-/*
- * See if this GPV is just to notify the kernel of SPR use and we can
- * retry the user instruction after adjusting some MPLs suitably.
- */
-static int retry_gpv(unsigned int gpv_reason)
-{
- int mpl;
-
- if (gpv_reason & IRET_ERROR)
- return 0;
-
- BUG_ON((gpv_reason & (MT_ERROR|MF_ERROR)) == 0);
- mpl = (gpv_reason & SPR_INDEX) >> SPR_MPL_SHIFT;
- if (mpl == INT_DMA_NOTIFY && !dma_disabled) {
- /* User is turning on DMA. Allow it and retry. */
- printk(KERN_DEBUG "Process %d/%s is now enabled for DMA\n",
- current->pid, current->comm);
- BUG_ON(current->thread.tile_dma_state.enabled);
- current->thread.tile_dma_state.enabled = 1;
- grant_dma_mpls();
- return 1;
- }
-
- return 0;
-}
-
-#endif /* CHIP_HAS_TILE_DMA() */
-
-extern tile_bundle_bits bpt_code;
-
-asm(".pushsection .rodata.bpt_code,\"a\";"
- ".align 8;"
- "bpt_code: bpt;"
- ".size bpt_code,.-bpt_code;"
- ".popsection");
-
-static int special_ill(tile_bundle_bits bundle, int *sigp, int *codep)
-{
- int sig, code, maxcode;
-
- if (bundle == bpt_code) {
- *sigp = SIGTRAP;
- *codep = TRAP_BRKPT;
- return 1;
- }
-
- /* If it's a "raise" bundle, then "ill" must be in pipe X1. */
-#ifdef __tilegx__
- if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0)
- return 0;
- if (get_Opcode_X1(bundle) != RRR_0_OPCODE_X1)
- return 0;
- if (get_RRROpcodeExtension_X1(bundle) != UNARY_RRR_0_OPCODE_X1)
- return 0;
- if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1)
- return 0;
-#else
- if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)
- return 0;
- if (get_Opcode_X1(bundle) != SHUN_0_OPCODE_X1)
- return 0;
- if (get_UnShOpcodeExtension_X1(bundle) != UN_0_SHUN_0_OPCODE_X1)
- return 0;
- if (get_UnOpcodeExtension_X1(bundle) != ILL_UN_0_SHUN_0_OPCODE_X1)
- return 0;
-#endif
-
- /* Check that the magic distinguishers are set to mean "raise". */
- if (get_Dest_X1(bundle) != 29 || get_SrcA_X1(bundle) != 37)
- return 0;
-
- /* There must be an "addli zero, zero, VAL" in X0. */
- if (get_Opcode_X0(bundle) != ADDLI_OPCODE_X0)
- return 0;
- if (get_Dest_X0(bundle) != TREG_ZERO)
- return 0;
- if (get_SrcA_X0(bundle) != TREG_ZERO)
- return 0;
-
- /*
- * Validate the proposed signal number and si_code value.
- * Note that we embed these in the static instruction itself
- * so that we perturb the register state as little as possible
- * at the time of the actual fault; it's unlikely you'd ever
- * need to dynamically choose which kind of fault to raise
- * from user space.
- */
- sig = get_Imm16_X0(bundle) & 0x3f;
- switch (sig) {
- case SIGILL:
- maxcode = NSIGILL;
- break;
- case SIGFPE:
- maxcode = NSIGFPE;
- break;
- case SIGSEGV:
- maxcode = NSIGSEGV;
- break;
- case SIGBUS:
- maxcode = NSIGBUS;
- break;
- case SIGTRAP:
- maxcode = NSIGTRAP;
- break;
- default:
- return 0;
- }
- code = (get_Imm16_X0(bundle) >> 6) & 0xf;
- if (code <= 0 || code > maxcode)
- return 0;
-
- /* Make it the requested signal. */
- *sigp = sig;
- *codep = code;
- return 1;
-}
-
-static const char *const int_name[] = {
- [INT_MEM_ERROR] = "Memory error",
- [INT_ILL] = "Illegal instruction",
- [INT_GPV] = "General protection violation",
- [INT_UDN_ACCESS] = "UDN access",
- [INT_IDN_ACCESS] = "IDN access",
-#if CHIP_HAS_SN()
- [INT_SN_ACCESS] = "SN access",
-#endif
- [INT_SWINT_3] = "Software interrupt 3",
- [INT_SWINT_2] = "Software interrupt 2",
- [INT_SWINT_0] = "Software interrupt 0",
- [INT_UNALIGN_DATA] = "Unaligned data",
- [INT_DOUBLE_FAULT] = "Double fault",
-#ifdef __tilegx__
- [INT_ILL_TRANS] = "Illegal virtual address",
-#endif
-};
-
-static int do_bpt(struct pt_regs *regs)
-{
- unsigned long bundle, bcode, bpt;
-
- bundle = *(unsigned long *)instruction_pointer(regs);
-
- /*
- * bpt shoule be { bpt; nop }, which is 0x286a44ae51485000ULL.
- * we encode the unused least significant bits for other purpose.
- */
- bpt = bundle & ~((1ULL << 12) - 1);
- if (bpt != TILE_BPT_BUNDLE)
- return 0;
-
- bcode = bundle & ((1ULL << 12) - 1);
- /*
- * notify the kprobe handlers, if instruction is likely to
- * pertain to them.
- */
- switch (bcode) {
- /* breakpoint_insn */
- case 0:
- notify_die(DIE_BREAK, "debug", regs, bundle,
- INT_ILL, SIGTRAP);
- break;
- /* compiled_bpt */
- case DIE_COMPILED_BPT:
- notify_die(DIE_COMPILED_BPT, "debug", regs, bundle,
- INT_ILL, SIGTRAP);
- break;
- /* breakpoint2_insn */
- case DIE_SSTEPBP:
- notify_die(DIE_SSTEPBP, "single_step", regs, bundle,
- INT_ILL, SIGTRAP);
- break;
- default:
- return 0;
- }
-
- return 1;
-}
-
-void __kprobes do_trap(struct pt_regs *regs, int fault_num,
- unsigned long reason)
-{
- siginfo_t info;
- int signo, code;
- unsigned long address = 0;
- tile_bundle_bits instr;
- int is_kernel = !user_mode(regs);
-
- clear_siginfo(&info);
-
- /* Handle breakpoints, etc. */
- if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
- return;
-
- /* Re-enable interrupts, if they were previously enabled. */
- if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
- local_irq_enable();
-
- /*
- * If it hits in kernel mode and we can't fix it up, just exit the
- * current process and hope for the best.
- */
- if (is_kernel) {
- const char *name;
- char buf[100];
- if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
- return;
- if (fault_num >= 0 &&
- fault_num < ARRAY_SIZE(int_name) &&
- int_name[fault_num] != NULL)
- name = int_name[fault_num];
- else
- name = "Unknown interrupt";
- if (fault_num == INT_GPV)
- snprintf(buf, sizeof(buf), "; GPV_REASON %#lx", reason);
-#ifdef __tilegx__
- else if (fault_num == INT_ILL_TRANS)
- snprintf(buf, sizeof(buf), "; address %#lx", reason);
-#endif
- else
- buf[0] = '\0';
- pr_alert("Kernel took bad trap %d (%s) at PC %#lx%s\n",
- fault_num, name, regs->pc, buf);
- show_regs(regs);
- do_exit(SIGKILL); /* FIXME: implement i386 die() */
- }
-
- switch (fault_num) {
- case INT_MEM_ERROR:
- signo = SIGBUS;
- code = BUS_OBJERR;
- break;
- case INT_ILL:
- if (copy_from_user(&instr, (void __user *)regs->pc,
- sizeof(instr))) {
- pr_err("Unreadable instruction for INT_ILL: %#lx\n",
- regs->pc);
- do_exit(SIGKILL);
- }
- if (!special_ill(instr, &signo, &code)) {
- signo = SIGILL;
- code = ILL_ILLOPC;
- }
- address = regs->pc;
- break;
- case INT_GPV:
-#if CHIP_HAS_TILE_DMA()
- if (retry_gpv(reason))
- return;
-#endif
- /*FALLTHROUGH*/
- case INT_UDN_ACCESS:
- case INT_IDN_ACCESS:
-#if CHIP_HAS_SN()
- case INT_SN_ACCESS:
-#endif
- signo = SIGILL;
- code = ILL_PRVREG;
- address = regs->pc;
- break;
- case INT_SWINT_3:
- case INT_SWINT_2:
- case INT_SWINT_0:
- signo = SIGILL;
- code = ILL_ILLTRP;
- address = regs->pc;
- break;
- case INT_UNALIGN_DATA:
-#ifndef __tilegx__ /* Emulated support for single step debugging */
- if (unaligned_fixup >= 0) {
- struct single_step_state *state =
- current_thread_info()->step_state;
- if (!state ||
- (void __user *)(regs->pc) != state->buffer) {
- single_step_once(regs);
- return;
- }
- }
-#endif
- signo = SIGBUS;
- code = BUS_ADRALN;
- address = 0;
- break;
- case INT_DOUBLE_FAULT:
- /*
- * For double fault, "reason" is actually passed as
- * SYSTEM_SAVE_K_2, the hypervisor's double-fault info, so
- * we can provide the original fault number rather than
- * the uninteresting "INT_DOUBLE_FAULT" so the user can
- * learn what actually struck while PL0 ICS was set.
- */
- fault_num = reason;
- signo = SIGILL;
- code = ILL_DBLFLT;
- address = regs->pc;
- break;
-#ifdef __tilegx__
- case INT_ILL_TRANS: {
- /* Avoid a hardware erratum with the return address stack. */
- fill_ra_stack();
-
- signo = SIGSEGV;
- address = reason;
- code = SEGV_MAPERR;
- break;
- }
-#endif
- default:
- panic("Unexpected do_trap interrupt number %d", fault_num);
- }
-
- info.si_signo = signo;
- info.si_code = code;
- info.si_addr = (void __user *)address;
- if (signo == SIGILL)
- info.si_trapno = fault_num;
- if (signo != SIGTRAP)
- trace_unhandled_signal("trap", regs, address, signo);
- force_sig_info(signo, &info, current);
-}
-
-void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
-{
- nmi_enter();
- switch (reason) {
-#ifdef arch_trigger_cpumask_backtrace
- case TILE_NMI_DUMP_STACK:
- nmi_cpu_backtrace(regs);
- break;
-#endif
- default:
- panic("Unexpected do_nmi type %ld", reason);
- }
- nmi_exit();
-}
-
-/* Deprecated function currently only used here. */
-extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
-
-void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
-{
- _dump_stack(dummy, pc, lr, sp, r52);
- pr_emerg("Double fault: exiting\n");
- machine_halt();
-}
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
deleted file mode 100644
index 77a0b6b6a2a1..000000000000
--- a/arch/tile/kernel/unaligned.c
+++ /dev/null
@@ -1,1603 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * A code-rewriter that handles unaligned exception.
- */
-
-#include <linux/smp.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/thread_info.h>
-#include <linux/uaccess.h>
-#include <linux/mman.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/extable.h>
-#include <linux/compat.h>
-#include <linux/prctl.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
-#include <arch/abi.h>
-#include <arch/spr_def.h>
-#include <arch/opcode.h>
-
-
-/*
- * This file handles unaligned exception for tile-Gx. The tilepro's unaligned
- * exception is supported out of single_step.c
- */
-
-int unaligned_printk;
-
-static int __init setup_unaligned_printk(char *str)
-{
- long val;
- if (kstrtol(str, 0, &val) != 0)
- return 0;
- unaligned_printk = val;
- pr_info("Printk for each unaligned data accesses is %s\n",
- unaligned_printk ? "enabled" : "disabled");
- return 1;
-}
-__setup("unaligned_printk=", setup_unaligned_printk);
-
-unsigned int unaligned_fixup_count;
-
-#ifdef __tilegx__
-
-/*
- * Unalign data jit fixup code fragement. Reserved space is 128 bytes.
- * The 1st 64-bit word saves fault PC address, 2nd word is the fault
- * instruction bundle followed by 14 JIT bundles.
- */
-
-struct unaligned_jit_fragment {
- unsigned long pc;
- tilegx_bundle_bits bundle;
- tilegx_bundle_bits insn[14];
-};
-
-/*
- * Check if a nop or fnop at bundle's pipeline X0.
- */
-
-static bool is_bundle_x0_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_X0(bundle) ==
- NOP_UNARY_OPCODE_X0) &&
- (get_RRROpcodeExtension_X0(bundle) ==
- UNARY_RRR_0_OPCODE_X0) &&
- (get_Opcode_X0(bundle) ==
- RRR_0_OPCODE_X0)) ||
- ((get_UnaryOpcodeExtension_X0(bundle) ==
- FNOP_UNARY_OPCODE_X0) &&
- (get_RRROpcodeExtension_X0(bundle) ==
- UNARY_RRR_0_OPCODE_X0) &&
- (get_Opcode_X0(bundle) ==
- RRR_0_OPCODE_X0)));
-}
-
-/*
- * Check if nop or fnop at bundle's pipeline X1.
- */
-
-static bool is_bundle_x1_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_X1(bundle) ==
- NOP_UNARY_OPCODE_X1) &&
- (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) &&
- (get_Opcode_X1(bundle) ==
- RRR_0_OPCODE_X1)) ||
- ((get_UnaryOpcodeExtension_X1(bundle) ==
- FNOP_UNARY_OPCODE_X1) &&
- (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) &&
- (get_Opcode_X1(bundle) ==
- RRR_0_OPCODE_X1)));
-}
-
-/*
- * Check if nop or fnop at bundle's Y0 pipeline.
- */
-
-static bool is_bundle_y0_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_Y0(bundle) ==
- NOP_UNARY_OPCODE_Y0) &&
- (get_RRROpcodeExtension_Y0(bundle) ==
- UNARY_RRR_1_OPCODE_Y0) &&
- (get_Opcode_Y0(bundle) ==
- RRR_1_OPCODE_Y0)) ||
- ((get_UnaryOpcodeExtension_Y0(bundle) ==
- FNOP_UNARY_OPCODE_Y0) &&
- (get_RRROpcodeExtension_Y0(bundle) ==
- UNARY_RRR_1_OPCODE_Y0) &&
- (get_Opcode_Y0(bundle) ==
- RRR_1_OPCODE_Y0)));
-}
-
-/*
- * Check if nop or fnop at bundle's pipeline Y1.
- */
-
-static bool is_bundle_y1_nop(tilegx_bundle_bits bundle)
-{
- return (((get_UnaryOpcodeExtension_Y1(bundle) ==
- NOP_UNARY_OPCODE_Y1) &&
- (get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) &&
- (get_Opcode_Y1(bundle) ==
- RRR_1_OPCODE_Y1)) ||
- ((get_UnaryOpcodeExtension_Y1(bundle) ==
- FNOP_UNARY_OPCODE_Y1) &&
- (get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) &&
- (get_Opcode_Y1(bundle) ==
- RRR_1_OPCODE_Y1)));
-}
-
-/*
- * Test if a bundle's y0 and y1 pipelines are both nop or fnop.
- */
-
-static bool is_y0_y1_nop(tilegx_bundle_bits bundle)
-{
- return is_bundle_y0_nop(bundle) && is_bundle_y1_nop(bundle);
-}
-
-/*
- * Test if a bundle's x0 and x1 pipelines are both nop or fnop.
- */
-
-static bool is_x0_x1_nop(tilegx_bundle_bits bundle)
-{
- return is_bundle_x0_nop(bundle) && is_bundle_x1_nop(bundle);
-}
-
-/*
- * Find the destination, source registers of fault unalign access instruction
- * at X1 or Y2. Also, allocate up to 3 scratch registers clob1, clob2 and
- * clob3, which are guaranteed different from any register used in the fault
- * bundle. r_alias is used to return if the other instructions other than the
- * unalign load/store shares same register with ra, rb and rd.
- */
-
-static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,
- uint64_t *rb, uint64_t *clob1, uint64_t *clob2,
- uint64_t *clob3, bool *r_alias)
-{
- int i;
- uint64_t reg;
- uint64_t reg_map = 0, alias_reg_map = 0, map;
- bool alias = false;
-
- /*
- * Parse fault bundle, find potential used registers and mark
- * corresponding bits in reg_map and alias_map. These 2 bit maps
- * are used to find the scratch registers and determine if there
- * is register alias.
- */
- if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* Y Mode Bundle. */
-
- reg = get_SrcA_Y2(bundle);
- reg_map |= 1ULL << reg;
- *ra = reg;
- reg = get_SrcBDest_Y2(bundle);
- reg_map |= 1ULL << reg;
-
- if (rd) {
- /* Load. */
- *rd = reg;
- alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
- } else {
- /* Store. */
- *rb = reg;
- alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
- }
-
- if (!is_bundle_y1_nop(bundle)) {
- reg = get_SrcA_Y1(bundle);
- reg_map |= (1ULL << reg);
- map = (1ULL << reg);
-
- reg = get_SrcB_Y1(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- reg = get_Dest_Y1(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- if (map & alias_reg_map)
- alias = true;
- }
-
- if (!is_bundle_y0_nop(bundle)) {
- reg = get_SrcA_Y0(bundle);
- reg_map |= (1ULL << reg);
- map = (1ULL << reg);
-
- reg = get_SrcB_Y0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- reg = get_Dest_Y0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- if (map & alias_reg_map)
- alias = true;
- }
- } else { /* X Mode Bundle. */
-
- reg = get_SrcA_X1(bundle);
- reg_map |= (1ULL << reg);
- *ra = reg;
- if (rd) {
- /* Load. */
- reg = get_Dest_X1(bundle);
- reg_map |= (1ULL << reg);
- *rd = reg;
- alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
- } else {
- /* Store. */
- reg = get_SrcB_X1(bundle);
- reg_map |= (1ULL << reg);
- *rb = reg;
- alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
- }
-
- if (!is_bundle_x0_nop(bundle)) {
- reg = get_SrcA_X0(bundle);
- reg_map |= (1ULL << reg);
- map = (1ULL << reg);
-
- reg = get_SrcB_X0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- reg = get_Dest_X0(bundle);
- reg_map |= (1ULL << reg);
- map |= (1ULL << reg);
-
- if (map & alias_reg_map)
- alias = true;
- }
- }
-
- /*
- * "alias" indicates if the unalign access registers have collision
- * with others in the same bundle. We jsut simply test all register
- * operands case (RRR), ignored the case with immidate. If a bundle
- * has no register alias, we may do fixup in a simple or fast manner.
- * So if an immidata field happens to hit with a register, we may end
- * up fall back to the generic handling.
- */
-
- *r_alias = alias;
-
- /* Flip bits on reg_map. */
- reg_map ^= -1ULL;
-
- /* Scan reg_map lower 54(TREG_SP) bits to find 3 set bits. */
- for (i = 0; i < TREG_SP; i++) {
- if (reg_map & (0x1ULL << i)) {
- if (*clob1 == -1) {
- *clob1 = i;
- } else if (*clob2 == -1) {
- *clob2 = i;
- } else if (*clob3 == -1) {
- *clob3 = i;
- return;
- }
- }
- }
-}
-
-/*
- * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them
- * is unexpected.
- */
-
-static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb,
- uint64_t clob1, uint64_t clob2, uint64_t clob3)
-{
- bool unexpected = false;
- if ((ra >= 56) && (ra != TREG_ZERO))
- unexpected = true;
-
- if ((clob1 >= 56) || (clob2 >= 56) || (clob3 >= 56))
- unexpected = true;
-
- if (rd != -1) {
- if ((rd >= 56) && (rd != TREG_ZERO))
- unexpected = true;
- } else {
- if ((rb >= 56) && (rb != TREG_ZERO))
- unexpected = true;
- }
- return unexpected;
-}
-
-
-#define GX_INSN_X0_MASK ((1ULL << 31) - 1)
-#define GX_INSN_X1_MASK (((1ULL << 31) - 1) << 31)
-#define GX_INSN_Y0_MASK ((0xFULL << 27) | (0xFFFFFULL))
-#define GX_INSN_Y1_MASK (GX_INSN_Y0_MASK << 31)
-#define GX_INSN_Y2_MASK ((0x7FULL << 51) | (0x7FULL << 20))
-
-#ifdef __LITTLE_ENDIAN
-#define GX_INSN_BSWAP(_bundle_) (_bundle_)
-#else
-#define GX_INSN_BSWAP(_bundle_) swab64(_bundle_)
-#endif /* __LITTLE_ENDIAN */
-
-/*
- * __JIT_CODE(.) creates template bundles in .rodata.unalign_data section.
- * The corresponding static function jix_x#_###(.) generates partial or
- * whole bundle based on the template and given arguments.
- */
-
-#define __JIT_CODE(_X_) \
- asm (".pushsection .rodata.unalign_data, \"a\"\n" \
- _X_"\n" \
- ".popsection\n")
-
-__JIT_CODE("__unalign_jit_x1_mtspr: {mtspr 0, r0}");
-static tilegx_bundle_bits jit_x1_mtspr(int spr, int reg)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_mtspr;
- return (GX_INSN_BSWAP(__unalign_jit_x1_mtspr) & GX_INSN_X1_MASK) |
- create_MT_Imm14_X1(spr) | create_SrcA_X1(reg);
-}
-
-__JIT_CODE("__unalign_jit_x1_mfspr: {mfspr r0, 0}");
-static tilegx_bundle_bits jit_x1_mfspr(int reg, int spr)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_mfspr;
- return (GX_INSN_BSWAP(__unalign_jit_x1_mfspr) & GX_INSN_X1_MASK) |
- create_MF_Imm14_X1(spr) | create_Dest_X1(reg);
-}
-
-__JIT_CODE("__unalign_jit_x0_addi: {addi r0, r0, 0; iret}");
-static tilegx_bundle_bits jit_x0_addi(int rd, int ra, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_addi;
- return (GX_INSN_BSWAP(__unalign_jit_x0_addi) & GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_Imm8_X0(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x1_ldna: {ldna r0, r0}");
-static tilegx_bundle_bits jit_x1_ldna(int rd, int ra)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_ldna;
- return (GX_INSN_BSWAP(__unalign_jit_x1_ldna) & GX_INSN_X1_MASK) |
- create_Dest_X1(rd) | create_SrcA_X1(ra);
-}
-
-__JIT_CODE("__unalign_jit_x0_dblalign: {dblalign r0, r0 ,r0}");
-static tilegx_bundle_bits jit_x0_dblalign(int rd, int ra, int rb)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_dblalign;
- return (GX_INSN_BSWAP(__unalign_jit_x0_dblalign) & GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_SrcB_X0(rb);
-}
-
-__JIT_CODE("__unalign_jit_x1_iret: {iret}");
-static tilegx_bundle_bits jit_x1_iret(void)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_iret;
- return GX_INSN_BSWAP(__unalign_jit_x1_iret) & GX_INSN_X1_MASK;
-}
-
-__JIT_CODE("__unalign_jit_x01_fnop: {fnop;fnop}");
-static tilegx_bundle_bits jit_x0_fnop(void)
-{
- extern tilegx_bundle_bits __unalign_jit_x01_fnop;
- return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X0_MASK;
-}
-
-static tilegx_bundle_bits jit_x1_fnop(void)
-{
- extern tilegx_bundle_bits __unalign_jit_x01_fnop;
- return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X1_MASK;
-}
-
-__JIT_CODE("__unalign_jit_y2_dummy: {fnop; fnop; ld zero, sp}");
-static tilegx_bundle_bits jit_y2_dummy(void)
-{
- extern tilegx_bundle_bits __unalign_jit_y2_dummy;
- return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y2_MASK;
-}
-
-static tilegx_bundle_bits jit_y1_fnop(void)
-{
- extern tilegx_bundle_bits __unalign_jit_y2_dummy;
- return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y1_MASK;
-}
-
-__JIT_CODE("__unalign_jit_x1_st1_add: {st1_add r1, r0, 0}");
-static tilegx_bundle_bits jit_x1_st1_add(int ra, int rb, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_st1_add;
- return (GX_INSN_BSWAP(__unalign_jit_x1_st1_add) &
- (~create_SrcA_X1(-1)) &
- GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
- create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x1_st: {crc32_8 r1, r0, r0; st r0, r0}");
-static tilegx_bundle_bits jit_x1_st(int ra, int rb)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_st;
- return (GX_INSN_BSWAP(__unalign_jit_x1_st) & GX_INSN_X1_MASK) |
- create_SrcA_X1(ra) | create_SrcB_X1(rb);
-}
-
-__JIT_CODE("__unalign_jit_x1_st_add: {st_add r1, r0, 0}");
-static tilegx_bundle_bits jit_x1_st_add(int ra, int rb, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_st_add;
- return (GX_INSN_BSWAP(__unalign_jit_x1_st_add) &
- (~create_SrcA_X1(-1)) &
- GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
- create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x1_ld: {crc32_8 r1, r0, r0; ld r0, r0}");
-static tilegx_bundle_bits jit_x1_ld(int rd, int ra)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_ld;
- return (GX_INSN_BSWAP(__unalign_jit_x1_ld) & GX_INSN_X1_MASK) |
- create_Dest_X1(rd) | create_SrcA_X1(ra);
-}
-
-__JIT_CODE("__unalign_jit_x1_ld_add: {ld_add r1, r0, 0}");
-static tilegx_bundle_bits jit_x1_ld_add(int rd, int ra, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_ld_add;
- return (GX_INSN_BSWAP(__unalign_jit_x1_ld_add) &
- (~create_Dest_X1(-1)) &
- GX_INSN_X1_MASK) | create_Dest_X1(rd) |
- create_SrcA_X1(ra) | create_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x0_bfexts: {bfexts r0, r0, 0, 0}");
-static tilegx_bundle_bits jit_x0_bfexts(int rd, int ra, int bfs, int bfe)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_bfexts;
- return (GX_INSN_BSWAP(__unalign_jit_x0_bfexts) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
-}
-
-__JIT_CODE("__unalign_jit_x0_bfextu: {bfextu r0, r0, 0, 0}");
-static tilegx_bundle_bits jit_x0_bfextu(int rd, int ra, int bfs, int bfe)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_bfextu;
- return (GX_INSN_BSWAP(__unalign_jit_x0_bfextu) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
-}
-
-__JIT_CODE("__unalign_jit_x1_addi: {bfextu r1, r1, 0, 0; addi r0, r0, 0}");
-static tilegx_bundle_bits jit_x1_addi(int rd, int ra, int imm8)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_addi;
- return (GX_INSN_BSWAP(__unalign_jit_x1_addi) & GX_INSN_X1_MASK) |
- create_Dest_X1(rd) | create_SrcA_X1(ra) |
- create_Imm8_X1(imm8);
-}
-
-__JIT_CODE("__unalign_jit_x0_shrui: {shrui r0, r0, 0; iret}");
-static tilegx_bundle_bits jit_x0_shrui(int rd, int ra, int imm6)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_shrui;
- return (GX_INSN_BSWAP(__unalign_jit_x0_shrui) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_ShAmt_X0(imm6);
-}
-
-__JIT_CODE("__unalign_jit_x0_rotli: {rotli r0, r0, 0; iret}");
-static tilegx_bundle_bits jit_x0_rotli(int rd, int ra, int imm6)
-{
- extern tilegx_bundle_bits __unalign_jit_x0_rotli;
- return (GX_INSN_BSWAP(__unalign_jit_x0_rotli) &
- GX_INSN_X0_MASK) |
- create_Dest_X0(rd) | create_SrcA_X0(ra) |
- create_ShAmt_X0(imm6);
-}
-
-__JIT_CODE("__unalign_jit_x1_bnezt: {bnezt r0, __unalign_jit_x1_bnezt}");
-static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff)
-{
- extern tilegx_bundle_bits __unalign_jit_x1_bnezt;
- return (GX_INSN_BSWAP(__unalign_jit_x1_bnezt) &
- GX_INSN_X1_MASK) |
- create_SrcA_X1(ra) | create_BrOff_X1(broff);
-}
-
-#undef __JIT_CODE
-
-/*
- * This function generates unalign fixup JIT.
- *
- * We first find unalign load/store instruction's destination, source
- * registers: ra, rb and rd. and 3 scratch registers by calling
- * find_regs(...). 3 scratch clobbers should not alias with any register
- * used in the fault bundle. Then analyze the fault bundle to determine
- * if it's a load or store, operand width, branch or address increment etc.
- * At last generated JIT is copied into JIT code area in user space.
- */
-
-static
-void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
- int align_ctl)
-{
- struct thread_info *info = current_thread_info();
- struct unaligned_jit_fragment frag;
- struct unaligned_jit_fragment *jit_code_area;
- tilegx_bundle_bits bundle_2 = 0;
- /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
- bool bundle_2_enable = true;
- uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;
- /*
- * Indicate if the unalign access
- * instruction's registers hit with
- * others in the same bundle.
- */
- bool alias = false;
- bool load_n_store = true;
- bool load_store_signed = false;
- unsigned int load_store_size = 8;
- bool y1_br = false; /* True, for a branch in same bundle at Y1.*/
- int y1_br_reg = 0;
- /* True for link operation. i.e. jalr or lnk at Y1 */
- bool y1_lr = false;
- int y1_lr_reg = 0;
- bool x1_add = false;/* True, for load/store ADD instruction at X1*/
- int x1_add_imm8 = 0;
- bool unexpected = false;
- int n = 0, k;
-
- jit_code_area =
- (struct unaligned_jit_fragment *)(info->unalign_jit_base);
-
- memset((void *)&frag, 0, sizeof(frag));
-
- /* 0: X mode, Otherwise: Y mode. */
- if (bundle & TILEGX_BUNDLE_MODE_MASK) {
- unsigned int mod, opcode;
-
- if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
- get_RRROpcodeExtension_Y1(bundle) ==
- UNARY_RRR_1_OPCODE_Y1) {
-
- opcode = get_UnaryOpcodeExtension_Y1(bundle);
-
- /*
- * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
- * pipeline.
- */
- switch (opcode) {
- case JALR_UNARY_OPCODE_Y1:
- case JALRP_UNARY_OPCODE_Y1:
- y1_lr = true;
- y1_lr_reg = 55; /* Link register. */
- /* FALLTHROUGH */
- case JR_UNARY_OPCODE_Y1:
- case JRP_UNARY_OPCODE_Y1:
- y1_br = true;
- y1_br_reg = get_SrcA_Y1(bundle);
- break;
- case LNK_UNARY_OPCODE_Y1:
- /* "lnk" at Y1 pipeline. */
- y1_lr = true;
- y1_lr_reg = get_Dest_Y1(bundle);
- break;
- }
- }
-
- opcode = get_Opcode_Y2(bundle);
- mod = get_Mode(bundle);
-
- /*
- * bundle_2 is bundle after making Y2 as a dummy operation
- * - ld zero, sp
- */
- bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();
-
- /* Make Y1 as fnop if Y1 is a branch or lnk operation. */
- if (y1_br || y1_lr) {
- bundle_2 &= ~(GX_INSN_Y1_MASK);
- bundle_2 |= jit_y1_fnop();
- }
-
- if (is_y0_y1_nop(bundle_2))
- bundle_2_enable = false;
-
- if (mod == MODE_OPCODE_YC2) {
- /* Store. */
- load_n_store = false;
- load_store_size = 1 << opcode;
- load_store_signed = false;
- find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
- &clob3, &alias);
- if (load_store_size > 8)
- unexpected = true;
- } else {
- /* Load. */
- load_n_store = true;
- if (mod == MODE_OPCODE_YB2) {
- switch (opcode) {
- case LD_OPCODE_Y2:
- load_store_signed = false;
- load_store_size = 8;
- break;
- case LD4S_OPCODE_Y2:
- load_store_signed = true;
- load_store_size = 4;
- break;
- case LD4U_OPCODE_Y2:
- load_store_signed = false;
- load_store_size = 4;
- break;
- default:
- unexpected = true;
- }
- } else if (mod == MODE_OPCODE_YA2) {
- if (opcode == LD2S_OPCODE_Y2) {
- load_store_signed = true;
- load_store_size = 2;
- } else if (opcode == LD2U_OPCODE_Y2) {
- load_store_signed = false;
- load_store_size = 2;
- } else
- unexpected = true;
- } else
- unexpected = true;
- find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
- &clob3, &alias);
- }
- } else {
- unsigned int opcode;
-
- /* bundle_2 is bundle after making X1 as "fnop". */
- bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();
-
- if (is_x0_x1_nop(bundle_2))
- bundle_2_enable = false;
-
- if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
- opcode = get_UnaryOpcodeExtension_X1(bundle);
-
- if (get_RRROpcodeExtension_X1(bundle) ==
- UNARY_RRR_0_OPCODE_X1) {
- load_n_store = true;
- find_regs(bundle, &rd, &ra, &rb, &clob1,
- &clob2, &clob3, &alias);
-
- switch (opcode) {
- case LD_UNARY_OPCODE_X1:
- load_store_signed = false;
- load_store_size = 8;
- break;
- case LD4S_UNARY_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD4U_UNARY_OPCODE_X1:
- load_store_size = 4;
- break;
-
- case LD2S_UNARY_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD2U_UNARY_OPCODE_X1:
- load_store_size = 2;
- break;
- default:
- unexpected = true;
- }
- } else {
- load_n_store = false;
- load_store_signed = false;
- find_regs(bundle, 0, &ra, &rb,
- &clob1, &clob2, &clob3,
- &alias);
-
- opcode = get_RRROpcodeExtension_X1(bundle);
- switch (opcode) {
- case ST_RRR_0_OPCODE_X1:
- load_store_size = 8;
- break;
- case ST4_RRR_0_OPCODE_X1:
- load_store_size = 4;
- break;
- case ST2_RRR_0_OPCODE_X1:
- load_store_size = 2;
- break;
- default:
- unexpected = true;
- }
- }
- } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
- load_n_store = true;
- opcode = get_Imm8OpcodeExtension_X1(bundle);
- switch (opcode) {
- case LD_ADD_IMM8_OPCODE_X1:
- load_store_size = 8;
- break;
-
- case LD4S_ADD_IMM8_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD4U_ADD_IMM8_OPCODE_X1:
- load_store_size = 4;
- break;
-
- case LD2S_ADD_IMM8_OPCODE_X1:
- load_store_signed = true;
- /* FALLTHROUGH */
- case LD2U_ADD_IMM8_OPCODE_X1:
- load_store_size = 2;
- break;
-
- case ST_ADD_IMM8_OPCODE_X1:
- load_n_store = false;
- load_store_size = 8;
- break;
- case ST4_ADD_IMM8_OPCODE_X1:
- load_n_store = false;
- load_store_size = 4;
- break;
- case ST2_ADD_IMM8_OPCODE_X1:
- load_n_store = false;
- load_store_size = 2;
- break;
- default:
- unexpected = true;
- }
-
- if (!unexpected) {
- x1_add = true;
- if (load_n_store)
- x1_add_imm8 = get_Imm8_X1(bundle);
- else
- x1_add_imm8 = get_Dest_Imm8_X1(bundle);
- }
-
- find_regs(bundle, load_n_store ? (&rd) : NULL,
- &ra, &rb, &clob1, &clob2, &clob3, &alias);
- } else
- unexpected = true;
- }
-
- /*
- * Some sanity check for register numbers extracted from fault bundle.
- */
- if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
- unexpected = true;
-
- /* Give warning if register ra has an aligned address. */
- if (!unexpected)
- WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));
-
-
- /*
- * Fault came from kernel space, here we only need take care of
- * unaligned "get_user/put_user" macros defined in "uaccess.h".
- * Basically, we will handle bundle like this:
- * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
- * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
- * For either load or store, byte-wise operation is performed by calling
- * get_user() or put_user(). If the macro returns non-zero value,
- * set the value to rx, otherwise set zero to rx. Finally make pc point
- * to next bundle and return.
- */
-
- if (EX1_PL(regs->ex1) != USER_PL) {
-
- unsigned long rx = 0;
- unsigned long x = 0, ret = 0;
-
- if (y1_br || y1_lr || x1_add ||
- (load_store_signed !=
- (load_n_store && load_store_size == 4))) {
- /* No branch, link, wrong sign-ext or load/store add. */
- unexpected = true;
- } else if (!unexpected) {
- if (bundle & TILEGX_BUNDLE_MODE_MASK) {
- /*
- * Fault bundle is Y mode.
- * Check if the Y1 and Y0 is the form of
- * { movei rx, 0; nop/fnop }, if yes,
- * find the rx.
- */
-
- if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
- && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
- (get_Imm8_Y1(bundle) == 0) &&
- is_bundle_y0_nop(bundle)) {
- rx = get_Dest_Y1(bundle);
- } else if ((get_Opcode_Y0(bundle) ==
- ADDI_OPCODE_Y0) &&
- (get_SrcA_Y0(bundle) == TREG_ZERO) &&
- (get_Imm8_Y0(bundle) == 0) &&
- is_bundle_y1_nop(bundle)) {
- rx = get_Dest_Y0(bundle);
- } else {
- unexpected = true;
- }
- } else {
- /*
- * Fault bundle is X mode.
- * Check if the X0 is 'movei rx, 0',
- * if yes, find the rx.
- */
-
- if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
- && (get_Imm8OpcodeExtension_X0(bundle) ==
- ADDI_IMM8_OPCODE_X0) &&
- (get_SrcA_X0(bundle) == TREG_ZERO) &&
- (get_Imm8_X0(bundle) == 0)) {
- rx = get_Dest_X0(bundle);
- } else {
- unexpected = true;
- }
- }
-
- /* rx should be less than 56. */
- if (!unexpected && (rx >= 56))
- unexpected = true;
- }
-
- if (!search_exception_tables(regs->pc)) {
- /* No fixup in the exception tables for the pc. */
- unexpected = true;
- }
-
- if (unexpected) {
- /* Unexpected unalign kernel fault. */
- struct task_struct *tsk = validate_current();
-
- bust_spinlocks(1);
-
- show_regs(regs);
-
- if (unlikely(tsk->pid < 2)) {
- panic("Kernel unalign fault running %s!",
- tsk->pid ? "init" : "the idle task");
- }
-#ifdef SUPPORT_DIE
- die("Oops", regs);
-#endif
- bust_spinlocks(1);
-
- do_group_exit(SIGKILL);
-
- } else {
- unsigned long i, b = 0;
- unsigned char *ptr =
- (unsigned char *)regs->regs[ra];
- if (load_n_store) {
- /* handle get_user(x, ptr) */
- for (i = 0; i < load_store_size; i++) {
- ret = get_user(b, ptr++);
- if (!ret) {
- /* Success! update x. */
-#ifdef __LITTLE_ENDIAN
- x |= (b << (8 * i));
-#else
- x <<= 8;
- x |= b;
-#endif /* __LITTLE_ENDIAN */
- } else {
- x = 0;
- break;
- }
- }
-
- /* Sign-extend 4-byte loads. */
- if (load_store_size == 4)
- x = (long)(int)x;
-
- /* Set register rd. */
- regs->regs[rd] = x;
-
- /* Set register rx. */
- regs->regs[rx] = ret;
-
- /* Bump pc. */
- regs->pc += 8;
-
- } else {
- /* Handle put_user(x, ptr) */
- x = regs->regs[rb];
-#ifdef __LITTLE_ENDIAN
- b = x;
-#else
- /*
- * Swap x in order to store x from low
- * to high memory same as the
- * little-endian case.
- */
- switch (load_store_size) {
- case 8:
- b = swab64(x);
- break;
- case 4:
- b = swab32(x);
- break;
- case 2:
- b = swab16(x);
- break;
- }
-#endif /* __LITTLE_ENDIAN */
- for (i = 0; i < load_store_size; i++) {
- ret = put_user(b, ptr++);
- if (ret)
- break;
- /* Success! shift 1 byte. */
- b >>= 8;
- }
- /* Set register rx. */
- regs->regs[rx] = ret;
-
- /* Bump pc. */
- regs->pc += 8;
- }
- }
-
- unaligned_fixup_count++;
-
- if (unaligned_printk) {
- pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
- current->comm, current->pid, regs->regs[ra]);
- }
-
- /* Done! Return to the exception handler. */
- return;
- }
-
- if ((align_ctl == 0) || unexpected) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = (unsigned char __user *)0;
-
- if (unaligned_printk)
- pr_info("Unalign bundle: unexp @%llx, %llx\n",
- (unsigned long long)regs->pc,
- (unsigned long long)bundle);
-
- if (ra < 56) {
- unsigned long uaa = (unsigned long)regs->regs[ra];
- /* Set bus Address. */
- info.si_addr = (unsigned char __user *)uaa;
- }
-
- unaligned_fixup_count++;
-
- trace_unhandled_signal("unaligned fixup trap", regs,
- (unsigned long)info.si_addr, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
-#ifdef __LITTLE_ENDIAN
-#define UA_FIXUP_ADDR_DELTA 1
-#define UA_FIXUP_BFEXT_START(_B_) 0
-#define UA_FIXUP_BFEXT_END(_B_) (8 * (_B_) - 1)
-#else /* __BIG_ENDIAN */
-#define UA_FIXUP_ADDR_DELTA -1
-#define UA_FIXUP_BFEXT_START(_B_) (64 - 8 * (_B_))
-#define UA_FIXUP_BFEXT_END(_B_) 63
-#endif /* __LITTLE_ENDIAN */
-
-
-
- if ((ra != rb) && (rd != TREG_SP) && !alias &&
- !y1_br && !y1_lr && !x1_add) {
- /*
- * Simple case: ra != rb and no register alias found,
- * and no branch or link. This will be the majority.
- * We can do a little better for simplae case than the
- * generic scheme below.
- */
- if (!load_n_store) {
- /*
- * Simple store: ra != rb, no need for scratch register.
- * Just store and rotate to right bytewise.
- */
-#ifdef __BIG_ENDIAN
- frag.insn[n++] =
- jit_x0_addi(ra, ra, load_store_size - 1) |
- jit_x1_fnop();
-#endif /* __BIG_ENDIAN */
- for (k = 0; k < load_store_size; k++) {
- /* Store a byte. */
- frag.insn[n++] =
- jit_x0_rotli(rb, rb, 56) |
- jit_x1_st1_add(ra, rb,
- UA_FIXUP_ADDR_DELTA);
- }
-#ifdef __BIG_ENDIAN
- frag.insn[n] = jit_x1_addi(ra, ra, 1);
-#else
- frag.insn[n] = jit_x1_addi(ra, ra,
- -1 * load_store_size);
-#endif /* __LITTLE_ENDIAN */
-
- if (load_store_size == 8) {
- frag.insn[n] |= jit_x0_fnop();
- } else if (load_store_size == 4) {
- frag.insn[n] |= jit_x0_rotli(rb, rb, 32);
- } else { /* = 2 */
- frag.insn[n] |= jit_x0_rotli(rb, rb, 16);
- }
- n++;
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
- frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
- } else {
- if (rd == ra) {
- /* Use two clobber registers: clob1/2. */
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -16) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, 7) |
- jit_x1_st_add(TREG_SP, clob1, -8);
- frag.insn[n++] =
- jit_x0_addi(clob2, ra, 0) |
- jit_x1_st(TREG_SP, clob2);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(rd, ra);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(clob1, clob1);
- /*
- * Note: we must make sure that rd must not
- * be sp. Recover clob1/2 from stack.
- */
- frag.insn[n++] =
- jit_x0_dblalign(rd, clob1, clob2) |
- jit_x1_ld_add(clob2, TREG_SP, 8);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob1, TREG_SP, 16);
- } else {
- /* Use one clobber register: clob1 only. */
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -16) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, 7) |
- jit_x1_st(TREG_SP, clob1);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(rd, ra);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(clob1, clob1);
- /*
- * Note: we must make sure that rd must not
- * be sp. Recover clob1 from stack.
- */
- frag.insn[n++] =
- jit_x0_dblalign(rd, clob1, ra) |
- jit_x1_ld_add(clob1, TREG_SP, 16);
- }
-
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
- /*
- * For non 8-byte load, extract corresponding bytes and
- * signed extension.
- */
- if (load_store_size == 4) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- } else if (load_store_size == 2) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- }
-
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_iret();
- }
- } else if (!load_n_store) {
-
- /*
- * Generic memory store cases: use 3 clobber registers.
- *
- * Alloc space for saveing clob2,1,3 on user's stack.
- * register clob3 points to where clob2 saved, followed by
- * clob1 and 3 from high to low memory.
- */
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -32) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob3, TREG_SP, 16) |
- jit_x1_st_add(TREG_SP, clob3, 8);
-#ifdef __LITTLE_ENDIAN
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, 0) |
- jit_x1_st_add(TREG_SP, clob1, 8);
-#else
- frag.insn[n++] =
- jit_x0_addi(clob1, ra, load_store_size - 1) |
- jit_x1_st_add(TREG_SP, clob1, 8);
-#endif
- if (load_store_size == 8) {
- /*
- * We save one byte a time, not for fast, but compact
- * code. After each store, data source register shift
- * right one byte. unchanged after 8 stores.
- */
- frag.insn[n++] =
- jit_x0_addi(clob2, TREG_ZERO, 7) |
- jit_x1_st_add(TREG_SP, clob2, 16);
- frag.insn[n++] =
- jit_x0_rotli(rb, rb, 56) |
- jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
- frag.insn[n++] =
- jit_x0_addi(clob2, clob2, -1) |
- jit_x1_bnezt(clob2, -1);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_addi(clob2, y1_br_reg, 0);
- } else if (load_store_size == 4) {
- frag.insn[n++] =
- jit_x0_addi(clob2, TREG_ZERO, 3) |
- jit_x1_st_add(TREG_SP, clob2, 16);
- frag.insn[n++] =
- jit_x0_rotli(rb, rb, 56) |
- jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
- frag.insn[n++] =
- jit_x0_addi(clob2, clob2, -1) |
- jit_x1_bnezt(clob2, -1);
- /*
- * same as 8-byte case, but need shift another 4
- * byte to recover rb for 4-byte store.
- */
- frag.insn[n++] = jit_x0_rotli(rb, rb, 32) |
- jit_x1_addi(clob2, y1_br_reg, 0);
- } else { /* =2 */
- frag.insn[n++] =
- jit_x0_addi(clob2, rb, 0) |
- jit_x1_st_add(TREG_SP, clob2, 16);
- for (k = 0; k < 2; k++) {
- frag.insn[n++] =
- jit_x0_shrui(rb, rb, 8) |
- jit_x1_st1_add(clob1, rb,
- UA_FIXUP_ADDR_DELTA);
- }
- frag.insn[n++] =
- jit_x0_addi(rb, clob2, 0) |
- jit_x1_addi(clob2, y1_br_reg, 0);
- }
-
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
-
- if (y1_lr) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mfspr(y1_lr_reg,
- SPR_EX_CONTEXT_0_0);
- }
- if (y1_br) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
- clob2);
- }
- if (x1_add) {
- frag.insn[n++] =
- jit_x0_addi(ra, ra, x1_add_imm8) |
- jit_x1_ld_add(clob2, clob3, -8);
- } else {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob2, clob3, -8);
- }
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob1, clob3, -8);
- frag.insn[n++] = jit_x0_fnop() | jit_x1_ld(clob3, clob3);
- frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
-
- } else {
- /*
- * Generic memory load cases.
- *
- * Alloc space for saveing clob1,2,3 on user's stack.
- * register clob3 points to where clob1 saved, followed
- * by clob2 and 3 from high to low memory.
- */
-
- frag.insn[n++] =
- jit_x0_addi(TREG_SP, TREG_SP, -32) |
- jit_x1_fnop();
- frag.insn[n++] =
- jit_x0_addi(clob3, TREG_SP, 16) |
- jit_x1_st_add(TREG_SP, clob3, 8);
- frag.insn[n++] =
- jit_x0_addi(clob2, ra, 0) |
- jit_x1_st_add(TREG_SP, clob2, 8);
-
- if (y1_br) {
- frag.insn[n++] =
- jit_x0_addi(clob1, y1_br_reg, 0) |
- jit_x1_st_add(TREG_SP, clob1, 16);
- } else {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_st_add(TREG_SP, clob1, 16);
- }
-
- if (bundle_2_enable)
- frag.insn[n++] = bundle_2;
-
- if (y1_lr) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mfspr(y1_lr_reg,
- SPR_EX_CONTEXT_0_0);
- }
-
- if (y1_br) {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
- clob1);
- }
-
- frag.insn[n++] =
- jit_x0_addi(clob1, clob2, 7) |
- jit_x1_ldna(rd, clob2);
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ldna(clob1, clob1);
- frag.insn[n++] =
- jit_x0_dblalign(rd, clob1, clob2) |
- jit_x1_ld_add(clob1, clob3, -8);
- if (x1_add) {
- frag.insn[n++] =
- jit_x0_addi(ra, ra, x1_add_imm8) |
- jit_x1_ld_add(clob2, clob3, -8);
- } else {
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld_add(clob2, clob3, -8);
- }
-
- frag.insn[n++] =
- jit_x0_fnop() |
- jit_x1_ld(clob3, clob3);
-
- if (load_store_size == 4) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(4),
- UA_FIXUP_BFEXT_END(4)) |
- jit_x1_fnop();
- } else if (load_store_size == 2) {
- if (load_store_signed)
- frag.insn[n++] =
- jit_x0_bfexts(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- else
- frag.insn[n++] =
- jit_x0_bfextu(
- rd, rd,
- UA_FIXUP_BFEXT_START(2),
- UA_FIXUP_BFEXT_END(2)) |
- jit_x1_fnop();
- }
-
- frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
- }
-
- /* Max JIT bundle count is 14. */
- WARN_ON(n > 14);
-
- if (!unexpected) {
- int status = 0;
- int idx = (regs->pc >> 3) &
- ((1ULL << (PAGE_SHIFT - UNALIGN_JIT_SHIFT)) - 1);
-
- frag.pc = regs->pc;
- frag.bundle = bundle;
-
- if (unaligned_printk) {
- pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
- current->comm, current->pid,
- (unsigned long)frag.pc,
- (unsigned long)frag.bundle,
- (int)alias, (int)rd, (int)ra,
- (int)rb, (int)bundle_2_enable,
- (int)y1_lr, (int)y1_br, (int)x1_add);
-
- for (k = 0; k < n; k += 2)
- pr_info("[%d] %016llx %016llx\n",
- k, (unsigned long long)frag.insn[k],
- (unsigned long long)frag.insn[k+1]);
- }
-
- /* Swap bundle byte order for big endian sys. */
-#ifdef __BIG_ENDIAN
- frag.bundle = GX_INSN_BSWAP(frag.bundle);
- for (k = 0; k < n; k++)
- frag.insn[k] = GX_INSN_BSWAP(frag.insn[k]);
-#endif /* __BIG_ENDIAN */
-
- status = copy_to_user((void __user *)&jit_code_area[idx],
- &frag, sizeof(frag));
- if (status) {
- /* Fail to copy JIT into user land. send SIGSEGV. */
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *)&jit_code_area[idx];
-
- pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
- current->pid, current->comm,
- (unsigned long long)&jit_code_area[idx]);
-
- trace_unhandled_signal("segfault in unalign fixup",
- regs,
- (unsigned long)info.si_addr,
- SIGSEGV);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
-
- /* Do a cheaper increment, not accurate. */
- unaligned_fixup_count++;
- __flush_icache_range((unsigned long)&jit_code_area[idx],
- (unsigned long)&jit_code_area[idx] +
- sizeof(frag));
-
- /* Setup SPR_EX_CONTEXT_0_0/1 for returning to user program.*/
- __insn_mtspr(SPR_EX_CONTEXT_0_0, regs->pc + 8);
- __insn_mtspr(SPR_EX_CONTEXT_0_1, PL_ICS_EX1(USER_PL, 0));
-
- /* Modify pc at the start of new JIT. */
- regs->pc = (unsigned long)&jit_code_area[idx].insn[0];
- /* Set ICS in SPR_EX_CONTEXT_K_1. */
- regs->ex1 = PL_ICS_EX1(USER_PL, 1);
- }
-}
-
-
-/*
- * C function to generate unalign data JIT. Called from unalign data
- * interrupt handler.
- *
- * First check if unalign fix is disabled or exception did not not come from
- * user space or sp register points to unalign address, if true, generate a
- * SIGBUS. Then map a page into user space as JIT area if it is not mapped
- * yet. Genenerate JIT code by calling jit_bundle_gen(). After that return
- * back to exception handler.
- *
- * The exception handler will "iret" to new generated JIT code after
- * restoring caller saved registers. In theory, the JIT code will perform
- * another "iret" to resume user's program.
- */
-
-void do_unaligned(struct pt_regs *regs, int vecnum)
-{
- tilegx_bundle_bits __user *pc;
- tilegx_bundle_bits bundle;
- struct thread_info *info = current_thread_info();
- int align_ctl;
-
- /* Checks the per-process unaligned JIT flags */
- align_ctl = unaligned_fixup;
- switch (task_thread_info(current)->align_ctl) {
- case PR_UNALIGN_NOPRINT:
- align_ctl = 1;
- break;
- case PR_UNALIGN_SIGBUS:
- align_ctl = 0;
- break;
- }
-
- /* Enable iterrupt in order to access user land. */
- local_irq_enable();
-
- /*
- * The fault came from kernel space. Two choices:
- * (a) unaligned_fixup < 1, we will first call get/put_user fixup
- * to return -EFAULT. If no fixup, simply panic the kernel.
- * (b) unaligned_fixup >=1, we will try to fix the unaligned access
- * if it was triggered by get_user/put_user() macros. Panic the
- * kernel if it is not fixable.
- */
-
- if (EX1_PL(regs->ex1) != USER_PL) {
-
- if (align_ctl < 1) {
- unaligned_fixup_count++;
- /* If exception came from kernel, try fix it up. */
- if (fixup_exception(regs)) {
- if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx\n",
- (int)unaligned_fixup,
- (unsigned long long)regs->ex1,
- (unsigned long long)regs->pc);
- } else {
- /* Not fixable. Go panic. */
- panic("Unalign exception in Kernel. pc=%lx",
- regs->pc);
- }
- } else {
- /*
- * Try to fix the exception. If we can't, panic the
- * kernel.
- */
- bundle = GX_INSN_BSWAP(
- *((tilegx_bundle_bits *)(regs->pc)));
- jit_bundle_gen(regs, bundle, align_ctl);
- }
- return;
- }
-
- /*
- * Fault came from user with ICS or stack is not aligned.
- * If so, we will trigger SIGBUS.
- */
- if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_code = BUS_ADRALN;
- info.si_addr = (unsigned char __user *)0;
-
- if (unaligned_printk)
- pr_info("Unalign fixup: %d %llx @%llx\n",
- (int)unaligned_fixup,
- (unsigned long long)regs->ex1,
- (unsigned long long)regs->pc);
-
- unaligned_fixup_count++;
-
- trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
-
- /* Read the bundle caused the exception! */
- pc = (tilegx_bundle_bits __user *)(regs->pc);
- if (get_user(bundle, pc) != 0) {
- /* Probably never be here since pc is valid user address.*/
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *)pc;
-
- pr_err("Couldn't read instruction at %p trying to step\n", pc);
- trace_unhandled_signal("segfault in unalign fixup", regs,
- (unsigned long)info.si_addr, SIGSEGV);
- force_sig_info(info.si_signo, &info, current);
- return;
- }
-
- if (!info->unalign_jit_base) {
- void __user *user_page;
-
- /*
- * Allocate a page in userland.
- * For 64-bit processes we try to place the mapping far
- * from anything else that might be going on (specifically
- * 64 GB below the top of the user address space). If it
- * happens not to be possible to put it there, it's OK;
- * the kernel will choose another location and we'll
- * remember it for later.
- */
- if (is_compat_task())
- user_page = NULL;
- else
- user_page = (void __user *)(TASK_SIZE - (1UL << 36)) +
- (current->pid << PAGE_SHIFT);
-
- user_page = (void __user *) vm_mmap(NULL,
- (unsigned long)user_page,
- PAGE_SIZE,
- PROT_EXEC | PROT_READ |
- PROT_WRITE,
-#ifdef CONFIG_HOMECACHE
- MAP_CACHE_HOME_TASK |
-#endif
- MAP_PRIVATE |
- MAP_ANONYMOUS,
- 0);
-
- if (IS_ERR((void __force *)user_page)) {
- pr_err("Out of kernel pages trying do_mmap\n");
- return;
- }
-
- /* Save the address in the thread_info struct */
- info->unalign_jit_base = user_page;
- if (unaligned_printk)
- pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
- raw_smp_processor_id(), current->pid,
- (unsigned long long)user_page);
- }
-
- /* Generate unalign JIT */
- jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
-}
-
-#endif /* __tilegx__ */
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
deleted file mode 100644
index 9f1e05e12255..000000000000
--- a/arch/tile/kernel/usb.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * Register the Tile-Gx USB interfaces as platform devices.
- *
- * The actual USB driver is just some glue (in
- * drivers/usb/host/[eo]hci-tilegx.c) which makes the registers available
- * to the standard kernel EHCI and OHCI drivers.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/usb/tilegx.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-
-static u64 ehci_dmamask = DMA_BIT_MASK(32);
-
-#define USB_HOST_DEF(unit, type, dmamask) \
- static struct \
- tilegx_usb_platform_data tilegx_usb_platform_data_ ## type ## \
- hci ## unit = { \
- .dev_index = unit, \
- }; \
- \
- static struct platform_device tilegx_usb_ ## type ## hci ## unit = { \
- .name = "tilegx-" #type "hci", \
- .id = unit, \
- .dev = { \
- .dma_mask = dmamask, \
- .coherent_dma_mask = DMA_BIT_MASK(32), \
- .platform_data = \
- &tilegx_usb_platform_data_ ## type ## hci ## \
- unit, \
- }, \
- };
-
-USB_HOST_DEF(0, e, &ehci_dmamask)
-USB_HOST_DEF(0, o, NULL)
-USB_HOST_DEF(1, e, &ehci_dmamask)
-USB_HOST_DEF(1, o, NULL)
-
-#undef USB_HOST_DEF
-
-static struct platform_device *tilegx_usb_devices[] __initdata = {
- &tilegx_usb_ehci0,
- &tilegx_usb_ehci1,
- &tilegx_usb_ohci0,
- &tilegx_usb_ohci1,
-};
-
-/** Add our set of possible USB devices. */
-static int __init tilegx_usb_init(void)
-{
- platform_add_devices(tilegx_usb_devices,
- ARRAY_SIZE(tilegx_usb_devices));
-
- return 0;
-}
-arch_initcall(tilegx_usb_init);
diff --git a/arch/tile/kernel/vdso.c b/arch/tile/kernel/vdso.c
deleted file mode 100644
index 5bc51d7dfdcb..000000000000
--- a/arch/tile/kernel/vdso.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/binfmts.h>
-#include <linux/compat.h>
-#include <linux/elf.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-
-#include <asm/vdso.h>
-#include <asm/mman.h>
-#include <asm/sections.h>
-
-#include <arch/sim.h>
-
-/* The alignment of the vDSO. */
-#define VDSO_ALIGNMENT PAGE_SIZE
-
-
-static unsigned int vdso_pages;
-static struct page **vdso_pagelist;
-
-#ifdef CONFIG_COMPAT
-static unsigned int vdso32_pages;
-static struct page **vdso32_pagelist;
-#endif
-static int vdso_ready;
-
-/*
- * The vdso data page.
- */
-static union {
- struct vdso_data data;
- u8 page[PAGE_SIZE];
-} vdso_data_store __page_aligned_data;
-
-struct vdso_data *vdso_data = &vdso_data_store.data;
-
-static unsigned int __read_mostly vdso_enabled = 1;
-
-static struct page **vdso_setup(void *vdso_kbase, unsigned int pages)
-{
- int i;
- struct page **pagelist;
-
- pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL);
- BUG_ON(pagelist == NULL);
- for (i = 0; i < pages - 1; i++) {
- struct page *pg = virt_to_page(vdso_kbase + i*PAGE_SIZE);
- ClearPageReserved(pg);
- pagelist[i] = pg;
- }
- pagelist[pages - 1] = virt_to_page(vdso_data);
- pagelist[pages] = NULL;
-
- return pagelist;
-}
-
-static int __init vdso_init(void)
-{
- int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT;
-
- /*
- * We can disable vDSO support generally, but we need to retain
- * one page to support the two-bundle (16-byte) rt_sigreturn path.
- */
- if (!vdso_enabled) {
- size_t offset = (unsigned long)&__vdso_rt_sigreturn;
- static struct page *sigret_page;
- sigret_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- BUG_ON(sigret_page == NULL);
- vdso_pagelist = &sigret_page;
- vdso_pages = 1;
- BUG_ON(offset >= PAGE_SIZE);
- memcpy(page_address(sigret_page) + offset,
- vdso_start + offset, 16);
-#ifdef CONFIG_COMPAT
- vdso32_pages = vdso_pages;
- vdso32_pagelist = vdso_pagelist;
-#endif
- vdso_ready = 1;
- return 0;
- }
-
- vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- vdso_pages += data_pages;
- vdso_pagelist = vdso_setup(vdso_start, vdso_pages);
-
-#ifdef CONFIG_COMPAT
- vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT;
- vdso32_pages += data_pages;
- vdso32_pagelist = vdso_setup(vdso32_start, vdso32_pages);
-#endif
-
- smp_wmb();
- vdso_ready = 1;
-
- return 0;
-}
-arch_initcall(vdso_init);
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
- if (vma->vm_mm && vma->vm_start == VDSO_BASE)
- return "[vdso]";
-#ifndef __tilegx__
- if (vma->vm_start == MEM_USER_INTRPT)
- return "[intrpt]";
-#endif
- return NULL;
-}
-
-int setup_vdso_pages(void)
-{
- struct page **pagelist;
- unsigned long pages;
- struct mm_struct *mm = current->mm;
- unsigned long vdso_base = 0;
- int retval = 0;
-
- if (!vdso_ready)
- return 0;
-
- mm->context.vdso_base = 0;
-
- pagelist = vdso_pagelist;
- pages = vdso_pages;
-#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
- pagelist = vdso32_pagelist;
- pages = vdso32_pages;
- }
-#endif
-
- /*
- * vDSO has a problem and was disabled, just don't "enable" it for the
- * process.
- */
- if (pages == 0)
- return 0;
-
- vdso_base = get_unmapped_area(NULL, vdso_base,
- (pages << PAGE_SHIFT) +
- ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
- 0, 0);
- if (IS_ERR_VALUE(vdso_base)) {
- retval = vdso_base;
- return retval;
- }
-
- /* Add required alignment. */
- vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
-
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
- */
- mm->context.vdso_base = vdso_base;
-
- /*
- * our vma flags don't have VM_WRITE so by default, the process isn't
- * allowed to write those pages.
- * gdb can break that with ptrace interface, and thus trigger COW on
- * those pages but it's then your responsibility to never do that on
- * the "data" page of the vDSO or you'll stop getting kernel updates
- * and your nice userland gettimeofday will be totally dead.
- * It's fine to use that for setting breakpoints in the vDSO code
- * pages though
- */
- retval = install_special_mapping(mm, vdso_base,
- pages << PAGE_SHIFT,
- VM_READ|VM_EXEC |
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
- pagelist);
- if (retval)
- mm->context.vdso_base = 0;
-
- return retval;
-}
-
-static __init int vdso_func(char *s)
-{
- return kstrtouint(s, 0, &vdso_enabled);
-}
-__setup("vdso=", vdso_func);
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
deleted file mode 100644
index b596a7396382..000000000000
--- a/arch/tile/kernel/vdso/Makefile
+++ /dev/null
@@ -1,117 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Symbols present in the vdso
-vdso-syms = rt_sigreturn gettimeofday
-
-# Files to link into the vdso
-obj-vdso = $(patsubst %, v%.o, $(vdso-syms))
-
-# Build rules
-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
-obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-
-# vdso32 is only for tilegx -m32 compat task.
-VDSO32-$(CONFIG_COMPAT) := y
-
-obj-y += vdso.o vdso-syms.o
-obj-$(VDSO32-y) += vdso32.o
-CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-
-# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
-CFLAGS_REMOVE_vdso.o = -pg
-CFLAGS_REMOVE_vdso32.o = -pg
-CFLAGS_REMOVE_vrt_sigreturn.o = -pg
-CFLAGS_REMOVE_vrt_sigreturn32.o = -pg
-CFLAGS_REMOVE_vgettimeofday.o = -pg
-CFLAGS_REMOVE_vgettimeofday32.o = -pg
-
-ifdef CONFIG_FEEDBACK_COLLECT
-# vDSO code runs in userspace, not collecting feedback data.
-CFLAGS_REMOVE_vdso.o = -ffeedback-generate
-CFLAGS_REMOVE_vdso32.o = -ffeedback-generate
-CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate
-CFLAGS_REMOVE_vrt_sigreturn32.o = -ffeedback-generate
-CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate
-CFLAGS_REMOVE_vgettimeofday32.o = -ffeedback-generate
-endif
-
-# Disable gcov profiling for VDSO code
-GCOV_PROFILE := n
-
-# Force dependency
-$(obj)/vdso.o: $(obj)/vdso.so
-
-# link rule for the .so file, .lds has to be first
-SYSCFLAGS_vdso.so.dbg = $(c_flags)
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,vdsold)
-
-# We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO. With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
-
-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
-SYSCFLAGS_vdso_dummy.o = -r
-$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
- $(call if_changed,vdsold)
-
-LDFLAGS_vdso-syms.o := -r -R
-$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
- $(call if_changed,ld)
-
-# strip rule for the .so file
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
- $(call if_changed,objcopy)
-
-# actual build commands
-# The DSO images are built using a special linker script
-# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions.
-# Make sure only to export the intended __vdso_xxx symbol offsets.
-quiet_cmd_vdsold = VDSOLD $@
- cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
- -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
- $(CROSS_COMPILE)objcopy \
- $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso32.so: $(obj)/vdso32.so.dbg
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
-vdso32_install: vdso32.so
-
-
-KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
-KBUILD_AFLAGS_32 += -m32 -s
-KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_32 += -m32 -fPIC -shared
-
-obj-vdso32 = $(patsubst %, v%32.o, $(vdso-syms))
-
-targets += $(obj-vdso32) vdso32.so vdso32.so.dbg
-obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
-
-$(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
-$(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
-
-$(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c FORCE
- $(call if_changed_rule,cc_o_c)
-
-$(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S FORCE
- $(call if_changed,as_o_S)
-
-# Force dependency
-$(obj)/vdso32.o: $(obj)/vdso32.so
-
-SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
-$(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32) FORCE
- $(call if_changed,vdsold)
diff --git a/arch/tile/kernel/vdso/vdso.S b/arch/tile/kernel/vdso/vdso.S
deleted file mode 100644
index 3467adb41630..000000000000
--- a/arch/tile/kernel/vdso/vdso.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .global vdso_start, vdso_end
- .align PAGE_SIZE
-vdso_start:
- .incbin "arch/tile/kernel/vdso/vdso.so"
- .align PAGE_SIZE
-vdso_end:
-
- .previous
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
deleted file mode 100644
index 731529f3f06f..000000000000
--- a/arch/tile/kernel/vdso/vdso.lds.S
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#define VDSO_VERSION_STRING LINUX_2.6
-
-
-OUTPUT_ARCH(tile)
-
-/* The ELF entry point can be used to set the AT_SYSINFO value. */
-ENTRY(__vdso_rt_sigreturn);
-
-
-SECTIONS
-{
- . = SIZEOF_HEADERS;
-
- .hash : { *(.hash) } :text
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note.*) } :text :note
- .dynamic : { *(.dynamic) } :text :dynamic
-
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
-
- .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
-
- /*
- * This linker script is used both with -r and with -shared.
- * For the layouts to match, we need to skip more than enough
- * space for the dynamic symbol table et al. If this amount
- * is insufficient, ld -shared will barf. Just increase it here.
- */
- . = 0x1000;
- .text : { *(.text .text.*) } :text
-
- .data : {
- *(.got.plt) *(.got)
- *(.data .data.* .gnu.linkonce.d.*)
- *(.dynbss)
- *(.bss .bss.* .gnu.linkonce.b.*)
- }
-}
-
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
- text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
-}
-
-
-/*
- * This controls what userland symbols we export from the vDSO.
- */
-VERSION
-{
- VDSO_VERSION_STRING {
- global:
- __vdso_rt_sigreturn;
- __vdso_gettimeofday;
- gettimeofday;
- __vdso_clock_gettime;
- clock_gettime;
- local:*;
- };
-}
diff --git a/arch/tile/kernel/vdso/vdso32.S b/arch/tile/kernel/vdso/vdso32.S
deleted file mode 100644
index 1d1ac3257e11..000000000000
--- a/arch/tile/kernel/vdso/vdso32.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2013 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .global vdso32_start, vdso32_end
- .align PAGE_SIZE
-vdso32_start:
- .incbin "arch/tile/kernel/vdso/vdso32.so"
- .align PAGE_SIZE
-vdso32_end:
-
- .previous
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
deleted file mode 100644
index e63310c49742..000000000000
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
-#include <linux/time.h>
-#include <asm/timex.h>
-#include <asm/unistd.h>
-#include <asm/vdso.h>
-
-#if CHIP_HAS_SPLIT_CYCLE()
-static inline cycles_t get_cycles_inline(void)
-{
- unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
- unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
- unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH);
-
- while (unlikely(high != high2)) {
- low = __insn_mfspr(SPR_CYCLE_LOW);
- high = high2;
- high2 = __insn_mfspr(SPR_CYCLE_HIGH);
- }
-
- return (((cycles_t)high) << 32) | low;
-}
-#define get_cycles get_cycles_inline
-#endif
-
-struct syscall_return_value {
- long value;
- long error;
-};
-
-/*
- * Find out the vDSO data page address in the process address space.
- */
-inline unsigned long get_datapage(void)
-{
- unsigned long ret;
-
- /* vdso data page located in the 2nd vDSO page. */
- asm volatile ("lnk %0" : "=r"(ret));
- ret &= ~(PAGE_SIZE - 1);
- ret += PAGE_SIZE;
-
- return ret;
-}
-
-static inline u64 vgetsns(struct vdso_data *vdso)
-{
- return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
-}
-
-static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
-{
- unsigned count;
- u64 ns;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->wall_time_sec;
- ns = vdso->wall_time_snsec;
- ns += vgetsns(vdso);
- ns >>= vdso->shift;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
-
- return 0;
-}
-
-static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
-{
- unsigned count;
- u64 ns;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->monotonic_time_sec;
- ns = vdso->monotonic_time_snsec;
- ns += vgetsns(vdso);
- ns >>= vdso->shift;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
-
- return 0;
-}
-
-static inline int do_realtime_coarse(struct vdso_data *vdso,
- struct timespec *ts)
-{
- unsigned count;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->wall_time_coarse_sec;
- ts->tv_nsec = vdso->wall_time_coarse_nsec;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- return 0;
-}
-
-static inline int do_monotonic_coarse(struct vdso_data *vdso,
- struct timespec *ts)
-{
- unsigned count;
-
- do {
- count = raw_read_seqcount_begin(&vdso->tb_seq);
- ts->tv_sec = vdso->monotonic_time_coarse_sec;
- ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
- } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
-
- return 0;
-}
-
-struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
- struct timezone *tz)
-{
- struct syscall_return_value ret = { 0, 0 };
- unsigned count;
- struct vdso_data *vdso = (struct vdso_data *)get_datapage();
-
- /* The use of the timezone is obsolete, normally tz is NULL. */
- if (unlikely(tz != NULL)) {
- do {
- count = raw_read_seqcount_begin(&vdso->tz_seq);
- tz->tz_minuteswest = vdso->tz_minuteswest;
- tz->tz_dsttime = vdso->tz_dsttime;
- } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
- }
-
- if (unlikely(tv == NULL))
- return ret;
-
- do_realtime(vdso, (struct timespec *)tv);
- tv->tv_usec /= 1000;
-
- return ret;
-}
-
-int gettimeofday(struct timeval *tv, struct timezone *tz)
- __attribute__((weak, alias("__vdso_gettimeofday")));
-
-static struct syscall_return_value vdso_fallback_gettime(long clock,
- struct timespec *ts)
-{
- struct syscall_return_value ret;
- __asm__ __volatile__ (
- "swint1"
- : "=R00" (ret.value), "=R01" (ret.error)
- : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
- : "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r11", "r12", "r13", "r14", "r15",
- "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
- "r24", "r25", "r26", "r27", "r28", "r29", "memory");
- return ret;
-}
-
-struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
- struct timespec *ts)
-{
- struct vdso_data *vdso = (struct vdso_data *)get_datapage();
- struct syscall_return_value ret = { 0, 0 };
-
- switch (clock) {
- case CLOCK_REALTIME:
- do_realtime(vdso, ts);
- return ret;
- case CLOCK_MONOTONIC:
- do_monotonic(vdso, ts);
- return ret;
- case CLOCK_REALTIME_COARSE:
- do_realtime_coarse(vdso, ts);
- return ret;
- case CLOCK_MONOTONIC_COARSE:
- do_monotonic_coarse(vdso, ts);
- return ret;
- default:
- return vdso_fallback_gettime(clock, ts);
- }
-}
-
-int clock_gettime(clockid_t clock, struct timespec *ts)
- __attribute__((weak, alias("__vdso_clock_gettime")));
diff --git a/arch/tile/kernel/vdso/vrt_sigreturn.S b/arch/tile/kernel/vdso/vrt_sigreturn.S
deleted file mode 100644
index 6326caf4a039..000000000000
--- a/arch/tile/kernel/vdso/vrt_sigreturn.S
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#include <linux/linkage.h>
-#include <arch/abi.h>
-#include <asm/unistd.h>
-
-/*
- * Note that libc has a copy of this function that it uses to compare
- * against the PC when a stack backtrace ends, so if this code is
- * changed, the libc implementation(s) should also be updated.
- */
-ENTRY(__vdso_rt_sigreturn)
- moveli TREG_SYSCALL_NR_NAME, __NR_rt_sigreturn
- swint1
- /* We don't use ENDPROC to avoid tagging this symbol as FUNC,
- * which confuses the perf tool.
- */
- END(__vdso_rt_sigreturn)
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
deleted file mode 100644
index 3558d981e336..000000000000
--- a/arch/tile/kernel/vmlinux.lds.S
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-#include <asm/thread_info.h>
-#include <hv/hypervisor.h>
-
-/* Text loads starting from the supervisor interrupt vector address. */
-#define TEXT_OFFSET MEM_SV_START
-
-OUTPUT_ARCH(tile)
-ENTRY(_start)
-jiffies = jiffies_64;
-
-PHDRS
-{
- intrpt PT_LOAD ;
- text PT_LOAD ;
- data PT_LOAD ;
-}
-SECTIONS
-{
- /* Text is loaded with a different VA than data; start with text. */
- #undef LOAD_OFFSET
- #define LOAD_OFFSET TEXT_OFFSET
-
- /* Interrupt vectors */
- .intrpt (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */
- {
- _text = .;
- *(.intrpt)
- } :intrpt =0
-
- /* Hypervisor call vectors */
- . = ALIGN(0x10000);
- .hvglue : AT (ADDR(.hvglue) - LOAD_OFFSET) {
- *(.hvglue)
- } :NONE
-
- /* Now the real code */
- . = ALIGN(0x20000);
- _stext = .;
- .text : AT (ADDR(.text) - LOAD_OFFSET) {
- HEAD_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- IRQENTRY_TEXT
- SOFTIRQENTRY_TEXT
- __fix_text_end = .; /* tile-cpack won't rearrange before this */
- ALIGN_FUNCTION();
- *(.hottext*)
- TEXT_TEXT
- *(.text.*)
- *(.coldtext*)
- *(.fixup)
- *(.gnu.warning)
- } :text =0
- _etext = .;
-
- /* "Init" is divided into two areas with very different virtual addresses. */
- INIT_TEXT_SECTION(PAGE_SIZE)
-
- /*
- * Some things, like the __jump_table, may contain symbol references
- * to __exit text, so include such text in the final image if so.
- * In that case we also override the _einittext from INIT_TEXT_SECTION.
- */
-#ifdef CONFIG_JUMP_LABEL
- .exit.text : {
- EXIT_TEXT
- _einittext = .;
- }
-#endif
-
- /* Now we skip back to PAGE_OFFSET for the data. */
- . = (. - TEXT_OFFSET + PAGE_OFFSET);
- #undef LOAD_OFFSET
- #define LOAD_OFFSET PAGE_OFFSET
-
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
- INIT_DATA_SECTION(16) :data =0
- PERCPU_SECTION(L2_CACHE_BYTES)
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-
- _sdata = .; /* Start of data section */
- RO_DATA_SECTION(PAGE_SIZE)
- RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
- _edata = .;
-
- EXCEPTION_TABLE(L2_CACHE_BYTES)
- NOTES
-
-
- BSS_SECTION(8, PAGE_SIZE, 1)
- _end = . ;
-
- STABS_DEBUG
- DWARF_DEBUG
-
- DISCARDS
-}