Some current cpus based on T-Head cores implement memory-types
way different than described in the svpbmt spec even going
so far as using PTE bits marked as reserved.
Add the T-Head vendor-id and necessary errata code to
replace the affected instructions.
Signed-off-by: Heiko Stuebner <heiko@...>
arch/riscv/Kconfig.erratas | 19 ++++++
arch/riscv/errata/Makefile | 1 +
arch/riscv/errata/thead/Makefile | 1 +
arch/riscv/errata/thead/errata.c | 85 ++++++++++++++++++++++++++
arch/riscv/include/asm/alternative.h | 5 ++
arch/riscv/include/asm/errata_list.h | 15 +++--
arch/riscv/include/asm/pgtable-64.h | 5 ++
arch/riscv/include/asm/vendorid_list.h | 1 +
arch/riscv/kernel/alternative.c | 14 +++++
arch/riscv/mm/init.c | 2 +
10 files changed, 144 insertions(+), 4 deletions(-)
create mode 100644 arch/riscv/errata/thead/Makefile
create mode 100644 arch/riscv/errata/thead/errata.c
@@ -31,4 +31,23 @@ config ERRATA_SIFIVE_CIP_1200
If you don't know what to do here, say "Y".
+config ERRATA_THEAD
+ bool "T-HEAD errata"
+ help
+ All T-HEAD errata Kconfig depend on this Kconfig. Disabling
+ this Kconfig will disable all T-HEAD errata. Please say "Y"
+ here if your platform uses T-HEAD CPU cores.
+
+ Otherwise, please say "N" here to avoid unnecessary overhead.
+
+config ERRATA_THEAD_MT
+ bool "Apply T-Head memory type errata"
+ depends on ERRATA_THEAD && 64BIT
+ default y
+ help
+ This will apply the memory type errata to handle the non-standard
+ memory type bits in page-table-entries on T-Head SoCs.
+
+ If you don't know what to do here, say "Y".
+
endmenu
@@ -1 +1,2 @@
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
+obj-$(CONFIG_ERRATA_THEAD) += thead/
@@ -0,0 +1 @@
+obj-y += errata.o
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/errata_list.h>
+#include <asm/patch.h>
+#include <asm/vendorid_list.h>
+
+struct errata_info {
+ char name[ERRATA_STRING_LENGTH_MAX];
+ bool (*check_func)(unsigned long arch_id, unsigned long impid);
+ unsigned int stage;
+};
+
+static bool errata_mt_check_func(unsigned long arch_id, unsigned long impid)
+{
+ if (arch_id != 0 || impid != 0)
+ return false;
+ return true;
+}
+
+static const struct errata_info errata_list[ERRATA_THEAD_NUMBER] = {
+ {
+ .name = "memory-types",
+ .stage = RISCV_ALTERNATIVES_VM,
+ .check_func = errata_mt_check_func
+ },
+};
+
+static u32 thead_errata_probe(unsigned int stage, unsigned long archid, unsigned long impid)
+{
+ const struct errata_info *info;
+ u32 cpu_req_errata = 0;
+ int idx;
+
+ for (idx = 0; idx < ERRATA_THEAD_NUMBER; idx++) {
+ info = &errata_list[idx];
+
+ if ((stage == RISCV_ALTERNATIVES_MODULE ||
+ info->stage == stage) && info->check_func(archid, impid))
+ cpu_req_errata |= (1U << idx);
+ }
+
+ return cpu_req_errata;
+}
+
+void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage)
+{
+ struct alt_entry *alt;
+ u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
+ u32 cpu_apply_errata = 0;
+ u32 tmp;
+
+ for (alt = begin; alt < end; alt++) {
+ if (alt->vendor_id != THEAD_VENDOR_ID)
+ continue;
+ if (alt->errata_id >= ERRATA_THEAD_NUMBER)
+ continue;
+
+ tmp = (1U << alt->errata_id);
+ if (cpu_req_errata & tmp) {
+ /* On vm-alternatives, the mmu isn't running yet */
+ if (stage == RISCV_ALTERNATIVES_VM)
+ memcpy((void *)__pa_symbol(alt->old_ptr),
+ (void *)__pa_symbol(alt->alt_ptr), alt->alt_len);
+ else
+ patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
+
+ cpu_apply_errata |= tmp;
+ }
+ }
+
+ if (stage == RISCV_ALTERNATIVES_VM)
+ local_flush_icache_all();
+}
@@ -20,9 +20,11 @@
#define RISCV_ALTERNATIVES_BOOT 0
#define RISCV_ALTERNATIVES_MODULE 1
#define RISCV_ALTERNATIVES_EARLY 2
+#define RISCV_ALTERNATIVES_VM 3
void __init apply_boot_alternatives(void);
void __init apply_early_alternatives(void);
+void __init apply_vm_alternatives(void);
void apply_module_alternatives(void *start, size_t length);
struct alt_entry {
@@ -41,6 +43,9 @@ struct errata_checkfunc_id {
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage);
+void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned int stage);
@@ -14,6 +14,11 @@
#define ERRATA_SIFIVE_NUMBER 2
#endif
+#ifdef CONFIG_ERRATA_THEAD
+#define ERRATA_THEAD_MT 0
+#define ERRATA_THEAD_NUMBER 1
+#endif
+
#define CPUFEATURE_SVPBMT 0
#define CPUFEATURE_NUMBER 1
@@ -42,10 +47,12 @@ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
* in the default case.
*/
#define ALT_SVPBMT_SHIFT 59
-#define ALT_SVPBMT(_val, prot) \
-asm(ALTERNATIVE("li %0, 0\t\nnop", "li %0, %1\t\nslli %0,%0,%2", 0, \
- CPUFEATURE_SVPBMT, CONFIG_64BIT) \
- : "=r"(_val) : "I"( prot##_SVPBMT >> ALT_SVPBMT_SHIFT), "I"(ALT_SVPBMT_SHIFT))
+#define ALT_SVPBMT(_val, prot) \
+asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
+ "li %0, %1\t\nslli %0,%0,%3", 0, CPUFEATURE_SVPBMT, CONFIG_64BIT, \
+ "li %0, %2\t\nslli %0,%0,%3", THEAD_VENDOR_ID, ERRATA_THEAD_MT, CONFIG_ERRATA_THEAD_MT) \
+ : "=r"(_val) : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
+ "I"(prot##_THEAD >> ALT_SVPBMT_SHIFT), "I"(ALT_SVPBMT_SHIFT))
#endif /* __ASSEMBLY__ */
@@ -44,6 +44,11 @@ typedef struct {
#define _PAGE_IO_SVPBMT (1UL << 62)
#define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
+#define _PAGE_PMA_THEAD 0x5000000000000000
+#define _PAGE_NOCACHE_THEAD 0UL
+#define _PAGE_IO_THEAD (1UL << 63)
+#define _PAGE_MTMASK_THEAD 0xf800000000000000
+
static inline u64 riscv_page_mtmask(void)
{
u64 val;
@@ -6,5 +6,6 @@
#define ASM_VENDOR_LIST_H
#define SIFIVE_VENDOR_ID 0x489
+#define THEAD_VENDOR_ID 0x5b7
#endif
@@ -47,6 +47,11 @@ static void __init init_alternative(void)
case SIFIVE_VENDOR_ID:
vendor_patch_func = sifive_errata_patch_func;
break;
+#endif
+#ifdef CONFIG_ERRATA_THEAD
+ case THEAD_VENDOR_ID:
+ vendor_patch_func = thead_errata_patch_func;
+ break;
#endif
default:
vendor_patch_func = NULL;
@@ -93,6 +98,15 @@ void __init apply_early_alternatives(void)
RISCV_ALTERNATIVES_EARLY);
}
+void __init apply_vm_alternatives(void)
+{
+ init_alternative();
+
+ _apply_alternatives((struct alt_entry *)__alt_start,
+ (struct alt_entry *)__alt_end,
+ RISCV_ALTERNATIVES_VM);
+}
+
#ifdef CONFIG_MODULES
void apply_module_alternatives(void *start, size_t length)
{
@@ -644,6 +644,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
#endif
+ apply_vm_alternatives();
+
setup_protection_map();
pt_ops.alloc_pte = alloc_pte_early;