diff --git a/sys/riscv/include/asm.h b/sys/riscv/include/asm.h index 4b7ab0765272..c119e299a0a5 100644 --- a/sys/riscv/include/asm.h +++ b/sys/riscv/include/asm.h @@ -69,4 +69,9 @@ li tmp, SSTATUS_SUM; \ csrc sstatus, tmp +#define SBI_CALL(ext, func) \ + li a7, ext; \ + li a6, func; \ + ecall + #endif /* _MACHINE_ASM_H_ */ diff --git a/sys/riscv/include/cpu.h b/sys/riscv/include/cpu.h index 0c33adb2abcd..a204b21a4a74 100644 --- a/sys/riscv/include/cpu.h +++ b/sys/riscv/include/cpu.h @@ -35,9 +35,11 @@ #ifndef _MACHINE_CPU_H_ #define _MACHINE_CPU_H_ +#ifndef LOCORE #include #include #include +#endif #define TRAPF_PC(tfp) ((tfp)->tf_sepc) #define TRAPF_USERMODE(tfp) (((tfp)->tf_sstatus & SSTATUS_SPP) == 0) @@ -88,6 +90,7 @@ #define MMU_SV57 0x4 /* 5-level paging */ #ifdef _KERNEL +#ifndef LOCORE extern char btext[]; extern char etext[]; @@ -105,6 +108,7 @@ get_cyclecount(void) return (rdcycle()); } -#endif +#endif /* !LOCORE */ +#endif /* _KERNEL */ #endif /* !_MACHINE_CPU_H_ */ diff --git a/sys/riscv/include/sbi.h b/sys/riscv/include/sbi.h index c8093238e268..7b103b2e0dcf 100644 --- a/sys/riscv/include/sbi.h +++ b/sys/riscv/include/sbi.h @@ -123,6 +123,8 @@ #define SBI_REMOTE_SFENCE_VMA_ASID 7 #define SBI_SHUTDOWN 8 +#ifndef LOCORE + #define SBI_CALL0(e, f) SBI_CALL5(e, f, 0, 0, 0, 0, 0) #define SBI_CALL1(e, f, p1) SBI_CALL5(e, f, p1, 0, 0, 0, 0) #define SBI_CALL2(e, f, p1, p2) SBI_CALL5(e, f, p1, p2, 0, 0, 0) @@ -242,4 +244,5 @@ sbi_console_getchar(void) void sbi_print_version(void); void sbi_init(void); +#endif /* !LOCORE */ #endif /* !_MACHINE_SBI_H_ */ diff --git a/sys/riscv/riscv/identcpu.c b/sys/riscv/riscv/identcpu.c index e02907092b56..54e008122eab 100644 --- a/sys/riscv/riscv/identcpu.c +++ b/sys/riscv/riscv/identcpu.c @@ -470,6 +470,18 @@ handle_thead_quirks(u_int cpu, struct cpu_desc *desc) if (cpu != 0) return; + /* + * For now, it is assumed that T-HEAD CPUs have both marchid and mimpid + * values of zero (although we leave this unchecked). It is true in + * practice for the early generations of this hardware (C906, C910, + * C920). In the future, the identity checks may need to become more + * granular, but until then all known T-HEAD quirks are applied + * indiscriminantly. + * + * Note: any changes in this function relating to has_errata_thead_pbmt + * may need to be applied to get_pte_fixup_bits (in locore.S) as well. + */ + has_errata_thead_pbmt = true; thead_setup_cache(); } diff --git a/sys/riscv/riscv/locore.S b/sys/riscv/riscv/locore.S index 5c0ade6e66ca..305ed8d79f10 100644 --- a/sys/riscv/riscv/locore.S +++ b/sys/riscv/riscv/locore.S @@ -42,9 +42,11 @@ #include "assym.inc" #include +#include #include -#include #include +#include +#include .globl kernbase .set kernbase, KERNBASE @@ -141,12 +143,16 @@ pagetables: /* Get the kernel's load address (kernstart) in s9 */ jal get_physmem + /* Get PTE attribute bits in s8 */ + jal get_pte_fixup_bits + /* Construct 1GB Identity Map (1:1 PA->VA) */ lla s1, bootstrap_pt_l1 srli s2, s9, L1_SHIFT /* kernstart >> L1_SHIFT */ andi a5, s2, Ln_ADDR_MASK /* & Ln_ADDR_MASK */ li t4, (PTE_KERN) + or t4, t4, s8 /* t4 |= pte bits */ slli s2, s2, PTE_PPN2_S /* (s2 << PTE_PPN2_S) */ or t6, t4, s2 @@ -182,6 +188,7 @@ pagetables: li t2, Ln_ENTRIES /* Build 512 entries */ add t3, t4, t2 li t0, (PTE_KERN | PTE_X) + or t0, t0, s8 /* t0 |= pte bits */ 1: slli t2, t4, PTE_PPN1_S /* << PTE_PPN1_S */ or t5, t0, t2 @@ -273,6 +280,28 @@ get_physmem: sub s9, t2, t1 /* s9 = physmem base */ ret +/* + * T-HEAD CPUs implement an alternate scheme for PTE attributes that is + * incompatible with the RISC-V PTE specification (see the definitions in + * pte.h). Worse, it defines a non-zero value for "main" memory, and this must + * be set in order to proceed with our new page tables. + * + * Therefore, we are forced to check the CPU identity here, which is both + * inconvenient and fragile. + * + * Return the required attribute bits in s8. For sane implementations this is + * zero. + */ +get_pte_fixup_bits: + mv s8, zero + SBI_CALL(SBI_EXT_ID_BASE, SBI_BASE_GET_MVENDORID) + li t0, MVENDORID_THEAD + xor t0, t0, a1 + bnez t0, 1f /* branch if a1 != t0 */ + li s8, PTE_THEAD_MA_NONE +1: + ret + .align 4 initstack: .space (PAGE_SIZE * KSTACK_PAGES)