diff options
Diffstat (limited to 'arch/riscv/kernel')
| -rw-r--r-- | arch/riscv/kernel/acpi_numa.c | 2 | ||||
| -rw-r--r-- | arch/riscv/kernel/patch.c | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/sys_hwprobe.c | 11 | ||||
| -rw-r--r-- | arch/riscv/kernel/traps.c | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/traps_misaligned.c | 6 | ||||
| -rw-r--r-- | arch/riscv/kernel/unaligned_access_speed.c | 12 | ||||
| -rw-r--r-- | arch/riscv/kernel/vendor_extensions.c | 2 | 
7 files changed, 23 insertions, 18 deletions
| diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c index 0231482d6946..ff95aeebee3e 100644 --- a/arch/riscv/kernel/acpi_numa.c +++ b/arch/riscv/kernel/acpi_numa.c @@ -28,7 +28,7 @@  #include <asm/numa.h> -static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; +static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };  int __init acpi_numa_get_nid(unsigned int cpu)  { diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 69e5796fc51f..34ef522f07a8 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -205,6 +205,8 @@ int patch_text_set_nosync(void *addr, u8 c, size_t len)  	int ret;  	ret = patch_insn_set(addr, c, len); +	if (!ret) +		flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);  	return ret;  } @@ -239,6 +241,8 @@ int patch_text_nosync(void *addr, const void *insns, size_t len)  	int ret;  	ret = patch_insn_write(addr, insns, len); +	if (!ret) +		flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);  	return ret;  } diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 8d1b5c35d2a7..cea0ca2bf2a2 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -178,13 +178,13 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)  			perf = this_perf;  		if (perf != this_perf) { -			perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; +			perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;  			break;  		}  	}  	if (perf == -1ULL) -		return RISCV_HWPROBE_MISALIGNED_UNKNOWN; +		return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;  	return perf;  } @@ -192,12 +192,12 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)  static u64 hwprobe_misaligned(const struct cpumask *cpus)  {  	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS)) -		return RISCV_HWPROBE_MISALIGNED_FAST; +		return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;  	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available()) -		return RISCV_HWPROBE_MISALIGNED_EMULATED; +		return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED; -	return RISCV_HWPROBE_MISALIGNED_SLOW; +	return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;  }  #endif @@ -225,6 +225,7 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,  		break;  	case RISCV_HWPROBE_KEY_CPUPERF_0: +	case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:  		pair->value = hwprobe_misaligned(cpus);  		break; diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 05a16b1f0aee..51ebfd23e007 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -319,6 +319,7 @@ void do_trap_ecall_u(struct pt_regs *regs)  		regs->epc += 4;  		regs->orig_a0 = regs->a0; +		regs->a0 = -ENOSYS;  		riscv_v_vstate_discard(regs); @@ -328,8 +329,7 @@ void do_trap_ecall_u(struct pt_regs *regs)  		if (syscall >= 0 && syscall < NR_syscalls)  			syscall_handler(regs, syscall); -		else if (syscall != -1) -			regs->a0 = -ENOSYS; +  		/*  		 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),  		 * so the maximum stack offset is 1k bytes (10 bits). diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index b62d5a2f4541..192cd5603e95 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -338,7 +338,7 @@ int handle_misaligned_load(struct pt_regs *regs)  	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);  #ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS -	*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED; +	*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;  #endif  	if (!unaligned_enabled) @@ -532,13 +532,13 @@ static bool check_unaligned_access_emulated(int cpu)  	unsigned long tmp_var, tmp_val;  	bool misaligned_emu_detected; -	*mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN; +	*mas_ptr = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;  	__asm__ __volatile__ (  		"       "REG_L" %[tmp], 1(%[ptr])\n"  		: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); -	misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED); +	misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED);  	/*  	 * If unaligned_ctl is already set, this means that we detected that all  	 * CPUS uses emulated misaligned access at boot time. If that changed diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index a9a6bcb02acf..160628a2116d 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -34,9 +34,9 @@ static int check_unaligned_access(void *param)  	struct page *page = param;  	void *dst;  	void *src; -	long speed = RISCV_HWPROBE_MISALIGNED_SLOW; +	long speed = RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW; -	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) +	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)  		return 0;  	/* Make an unaligned destination buffer. */ @@ -95,14 +95,14 @@ static int check_unaligned_access(void *param)  	}  	if (word_cycles < byte_cycles) -		speed = RISCV_HWPROBE_MISALIGNED_FAST; +		speed = RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;  	ratio = div_u64((byte_cycles * 100), word_cycles);  	pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",  		cpu,  		ratio / 100,  		ratio % 100, -		(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow"); +		(speed == RISCV_HWPROBE_MISALIGNED_SCALAR_FAST) ? "fast" : "slow");  	per_cpu(misaligned_access_speed, cpu) = speed; @@ -110,7 +110,7 @@ static int check_unaligned_access(void *param)  	 * Set the value of fast_misaligned_access of a CPU. These operations  	 * are atomic to avoid race conditions.  	 */ -	if (speed == RISCV_HWPROBE_MISALIGNED_FAST) +	if (speed == RISCV_HWPROBE_MISALIGNED_SCALAR_FAST)  		cpumask_set_cpu(cpu, &fast_misaligned_access);  	else  		cpumask_clear_cpu(cpu, &fast_misaligned_access); @@ -188,7 +188,7 @@ static int riscv_online_cpu(unsigned int cpu)  	static struct page *buf;  	/* We are already set since the last check */ -	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) +	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)  		goto exit;  	buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c index b6c1e7b5d34b..a8126d118341 100644 --- a/arch/riscv/kernel/vendor_extensions.c +++ b/arch/riscv/kernel/vendor_extensions.c @@ -38,7 +38,7 @@ bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsig  	#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES  	case ANDES_VENDOR_ID:  		bmap = &riscv_isa_vendor_ext_list_andes.all_harts_isa_bitmap; -		cpu_bmap = &riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap[cpu]; +		cpu_bmap = riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap;  		break;  	#endif  	default: |