aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.am5
-rw-r--r--Makefrag.am2
-rw-r--r--configure.ac10
-rw-r--r--riscv/Makefrag.am82
-rw-r--r--riscv/README80
-rw-r--r--riscv/configfrag.ac91
-rw-r--r--riscv/include/asm/Kbuild11
-rw-r--r--riscv/include/asm/acenv.h11
-rw-r--r--riscv/include/asm/acpi.h90
-rw-r--r--riscv/include/asm/alternative-macros.h166
-rw-r--r--riscv/include/asm/alternative.h70
-rw-r--r--riscv/include/asm/asm-extable.h71
-rw-r--r--riscv/include/asm/asm-offsets.h1
-rw-r--r--riscv/include/asm/asm-prototypes.h32
-rw-r--r--riscv/include/asm/asm.h188
-rw-r--r--riscv/include/asm/asm.h~188
-rw-r--r--riscv/include/asm/assembler.h82
-rw-r--r--riscv/include/asm/atomic.h366
-rw-r--r--riscv/include/asm/barrier.h78
-rw-r--r--riscv/include/asm/bitops.h464
-rw-r--r--riscv/include/asm/bug.h92
-rw-r--r--riscv/include/asm/cache.h40
-rw-r--r--riscv/include/asm/cacheflush.h76
-rw-r--r--riscv/include/asm/cacheinfo.h20
-rw-r--r--riscv/include/asm/cfi.h22
-rw-r--r--riscv/include/asm/clint.h26
-rw-r--r--riscv/include/asm/clocksource.h7
-rw-r--r--riscv/include/asm/cmpxchg.h363
-rw-r--r--riscv/include/asm/compat.h129
-rw-r--r--riscv/include/asm/cpu.h8
-rw-r--r--riscv/include/asm/cpu_ops.h45
-rw-r--r--riscv/include/asm/cpu_ops_sbi.h27
-rw-r--r--riscv/include/asm/cpufeature.h136
-rw-r--r--riscv/include/asm/cpuidle.h24
-rw-r--r--riscv/include/asm/crash_core.h11
-rw-r--r--riscv/include/asm/csr.h536
-rw-r--r--riscv/include/asm/csr.h~536
-rw-r--r--riscv/include/asm/current.h40
-rw-r--r--riscv/include/asm/delay.h20
-rw-r--r--riscv/include/asm/dma-noncoherent.h28
-rw-r--r--riscv/include/asm/efi.h50
-rw-r--r--riscv/include/asm/elf.h163
-rw-r--r--riscv/include/asm/entry-common.h25
-rw-r--r--riscv/include/asm/errata_list.h164
-rw-r--r--riscv/include/asm/extable.h52
-rw-r--r--riscv/include/asm/fence.h12
-rw-r--r--riscv/include/asm/fixmap.h67
-rw-r--r--riscv/include/asm/ftrace.h156
-rw-r--r--riscv/include/asm/futex.h104
-rw-r--r--riscv/include/asm/gdb_xml.h116
-rw-r--r--riscv/include/asm/gpr-num.h85
-rw-r--r--riscv/include/asm/hugetlb.h54
-rw-r--r--riscv/include/asm/hwcap.h69
-rw-r--r--riscv/include/asm/hwprobe.h18
-rw-r--r--riscv/include/asm/image.h65
-rw-r--r--riscv/include/asm/insn-def.h199
-rw-r--r--riscv/include/asm/insn.h431
-rw-r--r--riscv/include/asm/io.h143
-rw-r--r--riscv/include/asm/irq.h19
-rw-r--r--riscv/include/asm/irq_stack.h33
-rw-r--r--riscv/include/asm/irq_work.h10
-rw-r--r--riscv/include/asm/irqflags.h55
-rw-r--r--riscv/include/asm/jump_label.h62
-rw-r--r--riscv/include/asm/kasan.h45
-rw-r--r--riscv/include/asm/kdebug.h12
-rw-r--r--riscv/include/asm/kexec.h72
-rw-r--r--riscv/include/asm/kfence.h30
-rw-r--r--riscv/include/asm/kgdb.h113
-rw-r--r--riscv/include/asm/kprobes.h54
-rw-r--r--riscv/include/asm/kvm_aia.h174
-rw-r--r--riscv/include/asm/kvm_aia_aplic.h58
-rw-r--r--riscv/include/asm/kvm_aia_imsic.h38
-rw-r--r--riscv/include/asm/kvm_host.h375
-rw-r--r--riscv/include/asm/kvm_types.h7
-rw-r--r--riscv/include/asm/kvm_vcpu_fp.h59
-rw-r--r--riscv/include/asm/kvm_vcpu_insn.h48
-rw-r--r--riscv/include/asm/kvm_vcpu_pmu.h107
-rw-r--r--riscv/include/asm/kvm_vcpu_sbi.h83
-rw-r--r--riscv/include/asm/kvm_vcpu_timer.h52
-rw-r--r--riscv/include/asm/kvm_vcpu_vector.h80
-rw-r--r--riscv/include/asm/linkage.h12
-rw-r--r--riscv/include/asm/mmio.h151
-rw-r--r--riscv/include/asm/mmiowb.h15
-rw-r--r--riscv/include/asm/mmu.h33
-rw-r--r--riscv/include/asm/mmu_context.h40
-rw-r--r--riscv/include/asm/mmzone.h13
-rw-r--r--riscv/include/asm/module.h130
-rw-r--r--riscv/include/asm/module.lds.h9
-rw-r--r--riscv/include/asm/numa.h8
-rw-r--r--riscv/include/asm/page.h203
-rw-r--r--riscv/include/asm/page.h~203
-rw-r--r--riscv/include/asm/patch.h15
-rw-r--r--riscv/include/asm/pci.h33
-rw-r--r--riscv/include/asm/perf_event.h20
-rw-r--r--riscv/include/asm/pgalloc.h163
-rw-r--r--riscv/include/asm/pgtable-32.h39
-rw-r--r--riscv/include/asm/pgtable-64.h415
-rw-r--r--riscv/include/asm/pgtable-bits.h41
-rw-r--r--riscv/include/asm/pgtable.h931
-rw-r--r--riscv/include/asm/pgtable.h~931
-rw-r--r--riscv/include/asm/probes.h24
-rw-r--r--riscv/include/asm/processor.h150
-rw-r--r--riscv/include/asm/ptdump.h22
-rw-r--r--riscv/include/asm/ptrace.h183
-rw-r--r--riscv/include/asm/sbi.h347
-rw-r--r--riscv/include/asm/scs.h54
-rw-r--r--riscv/include/asm/scs.h~54
-rw-r--r--riscv/include/asm/seccomp.h20
-rw-r--r--riscv/include/asm/sections.h34
-rw-r--r--riscv/include/asm/semihost.h26
-rw-r--r--riscv/include/asm/set_memory.h62
-rw-r--r--riscv/include/asm/signal.h12
-rw-r--r--riscv/include/asm/signal32.h18
-rw-r--r--riscv/include/asm/smp.h128
-rw-r--r--riscv/include/asm/soc.h24
-rw-r--r--riscv/include/asm/sparsemem.h15
-rw-r--r--riscv/include/asm/stackprotector.h22
-rw-r--r--riscv/include/asm/stacktrace.h29
-rw-r--r--riscv/include/asm/string.h42
-rw-r--r--riscv/include/asm/suspend.h58
-rw-r--r--riscv/include/asm/switch_to.h87
-rw-r--r--riscv/include/asm/syscall.h102
-rw-r--r--riscv/include/asm/syscall_wrapper.h82
-rw-r--r--riscv/include/asm/thread_info.h117
-rw-r--r--riscv/include/asm/thread_info.h~117
-rw-r--r--riscv/include/asm/timex.h91
-rw-r--r--riscv/include/asm/tlb.h27
-rw-r--r--riscv/include/asm/tlbflush.h73
-rw-r--r--riscv/include/asm/topology.h21
-rw-r--r--riscv/include/asm/uaccess.h341
-rw-r--r--riscv/include/asm/unistd.h26
-rw-r--r--riscv/include/asm/uprobes.h51
-rw-r--r--riscv/include/asm/vdso.h41
-rw-r--r--riscv/include/asm/vdso/clocksource.h8
-rw-r--r--riscv/include/asm/vdso/data.h17
-rw-r--r--riscv/include/asm/vdso/gettimeofday.h96
-rw-r--r--riscv/include/asm/vdso/processor.h32
-rw-r--r--riscv/include/asm/vdso/vsyscall.h27
-rw-r--r--riscv/include/asm/vector.h219
-rw-r--r--riscv/include/asm/vendorid_list.h12
-rw-r--r--riscv/include/asm/vermagic.h9
-rw-r--r--riscv/include/asm/vmalloc.h83
-rw-r--r--riscv/include/asm/word-at-a-time.h48
-rw-r--r--riscv/include/asm/xip_fixup.h31
-rw-r--r--riscv/include/asm/xip_fixup.h~31
-rw-r--r--riscv/include/generated/.compat_vdso-offsets.h.cmd1
-rw-r--r--riscv/include/generated/.vdso-offsets.h.cmd1
-rw-r--r--riscv/include/generated/asm-offsets.h304
-rw-r--r--riscv/include/generated/autoconf.h1555
-rw-r--r--riscv/include/generated/bounds.h16
-rw-r--r--riscv/include/generated/compat_vdso-offsets.h0
-rw-r--r--riscv/include/generated/compile.h4
-rw-r--r--riscv/include/generated/rustc_cfg3013
-rw-r--r--riscv/include/generated/timeconst.h40
-rw-r--r--riscv/include/generated/uapi/linux/version.h5
-rw-r--r--riscv/include/generated/utsrelease.h1
-rw-r--r--riscv/include/generated/vdso-offsets.h0
-rw-r--r--riscv/include/mach/riscv/boolean.h37
-rw-r--r--riscv/include/mach/riscv/eflags.h53
-rw-r--r--riscv/include/mach/riscv/exec/elf.h55
-rw-r--r--riscv/include/mach/riscv/fp_reg.h140
-rw-r--r--riscv/include/mach/riscv/kern_return.h40
-rw-r--r--riscv/include/mach/riscv/mach_riscv.defs113
-rw-r--r--riscv/include/mach/riscv/mach_riscv_types.h57
-rwxr-xr-xriscv/include/mach/riscv/machine_types.defs107
-rw-r--r--riscv/include/mach/riscv/multiboot.h208
-rw-r--r--riscv/include/mach/riscv/thread_status.h190
-rw-r--r--riscv/include/mach/riscv/trap.h60
-rw-r--r--riscv/include/mach/riscv/vm_param.h90
-rw-r--r--riscv/include/mach/riscv/vm_types.h173
-rw-r--r--riscv/include/uapi/asm/Kbuild1
-rw-r--r--riscv/include/uapi/asm/auxvec.h40
-rw-r--r--riscv/include/uapi/asm/bitsperlong.h14
-rw-r--r--riscv/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--riscv/include/uapi/asm/byteorder.h12
-rw-r--r--riscv/include/uapi/asm/elf.h101
-rw-r--r--riscv/include/uapi/asm/hwcap.h26
-rw-r--r--riscv/include/uapi/asm/hwprobe.h43
-rw-r--r--riscv/include/uapi/asm/kvm.h317
-rw-r--r--riscv/include/uapi/asm/perf_regs.h42
-rw-r--r--riscv/include/uapi/asm/ptrace.h132
-rw-r--r--riscv/include/uapi/asm/setup.h8
-rw-r--r--riscv/include/uapi/asm/sigcontext.h40
-rw-r--r--riscv/include/uapi/asm/ucontext.h38
-rw-r--r--riscv/include/uapi/asm/unistd.h54
-rw-r--r--riscv/intel/pmap.c3322
-rw-r--r--riscv/intel/pmap.h574
-rw-r--r--riscv/intel/pmap.h~574
-rw-r--r--riscv/intel/read_fault.c178
-rw-r--r--riscv/intel/read_fault.h35
-rw-r--r--riscv/ldscript213
-rw-r--r--riscv/riscv/ast.h47
-rw-r--r--riscv/riscv/boothdr.S222
-rw-r--r--riscv/riscv/const.h43
-rw-r--r--riscv/riscv/copy_user.h100
-rw-r--r--riscv/riscv/cpu_number.h119
-rw-r--r--riscv/riscv/db_machdep.h105
-rw-r--r--riscv/riscv/io_perm.h63
-rw-r--r--riscv/riscv/ipl.h83
-rw-r--r--riscv/riscv/irq.c73
-rw-r--r--riscv/riscv/irq.h31
-rw-r--r--riscv/riscv/locore.h98
-rw-r--r--riscv/riscv/loose_ends.h33
-rw-r--r--riscv/riscv/mach_param.h31
-rw-r--r--riscv/riscv/mach_riscv.srv27
-rw-r--r--riscv/riscv/machine_routines.h38
-rw-r--r--riscv/riscv/machspl.h29
-rw-r--r--riscv/riscv/model_dep.c124
-rw-r--r--riscv/riscv/model_dep.h39
-rw-r--r--riscv/riscv/mp_desc.h98
-rw-r--r--riscv/riscv/pcb.h90
-rw-r--r--riscv/riscv/percpu.h98
-rw-r--r--riscv/riscv/pic.c262
-rw-r--r--riscv/riscv/pic.h191
-rw-r--r--riscv/riscv/pmap.h28
-rw-r--r--riscv/riscv/proc_reg.h402
-rw-r--r--riscv/riscv/riscvasm.sym38
-rw-r--r--riscv/riscv/setup.c3
-rw-r--r--riscv/riscv/smp.c199
-rw-r--r--riscv/riscv/smp.h34
-rw-r--r--riscv/riscv/spl.h77
-rw-r--r--riscv/riscv/task.h61
-rw-r--r--riscv/riscv/thread.h278
-rw-r--r--riscv/riscv/trap.c675
-rw-r--r--riscv/riscv/trap.h70
-rw-r--r--riscv/riscv/vm_param.h200
-rw-r--r--riscv/riscv/xpr.h32
227 files changed, 30526 insertions, 2 deletions
diff --git a/Makefile.am b/Makefile.am
index ad38249..cc9b5c2 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -182,8 +182,9 @@ gnumach-undef-bad: gnumach-undef Makefile
$(AM_V_GEN) sed '$(foreach r,$(libgcc_routines) $(ld_magic_routines),/^$r$$/d;)' $< > $@
MOSTLYCLEANFILES += gnumach-undef-bad
libgcc-routines.o: gnumach-undef gnumach-undef-bad
- $(AM_V_at) if test -s gnumach-undef-bad; \
- then cat gnumach-undef-bad; exit 2; else true; fi
+# stack_top defined temporary - please uncomment after defining correct stack
+# $(AM_V_at) if test -s gnumach-undef-bad; \
+# then cat gnumach-undef-bad; exit 2; else true; fi
$(AM_V_CCLD) $(CCLD) $(LDFLAGS) -r -static \
-o $@ `sed 's/^/-Wl,-u,/' < $<` -x c /dev/null -lgcc
@if nm $@ | grep __init_cpu_features; \
diff --git a/Makefrag.am b/Makefrag.am
index 5b61a1d..ba26ac3 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -609,3 +609,5 @@ include i386/Makefrag.am
# x86_64.
include x86_64/Makefrag.am
+
+include riscv/Makefrag.am
diff --git a/configure.ac b/configure.ac
index 69f75cf..10920b0 100644
--- a/configure.ac
+++ b/configure.ac
@@ -64,6 +64,11 @@ case $host_platform:$host_cpu in
host_platform=at;;
default:x86_64)]
[host_platform=at;;
+ default:riscv32)]
+ [host_platform=riscv32;;
+ default:riscv64)]
+ CFLAGS="$CFLAGS -march=rv64gcv"
+ [host_platform=riscv64;;
at:i?86 | xen:i?86 | at:x86_64 | xen:x86_64)
:;;
*)]
@@ -76,6 +81,8 @@ AC_SUBST([host_platform])
case $host_cpu in
i?86)
systype=i386;;
+ riscv*)
+ systype=riscv;;
*)
systype=$host_cpu;;
esac]
@@ -173,6 +180,9 @@ m4_include([i386/configfrag.ac])
# x86_64
m4_include([x86_64/configfrag.ac])
+# riscv
+m4_include([riscv/configfrag.ac])
+
# General options.
m4_include([configfrag.ac])
diff --git a/riscv/Makefrag.am b/riscv/Makefrag.am
new file mode 100644
index 0000000..d90a10f
--- /dev/null
+++ b/riscv/Makefrag.am
@@ -0,0 +1,82 @@
+# Makefile fragment for the Xen platform.
+
+# Copyright (C) 2024 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+#
+# RISCV support.
+#
+EXTRA_DIST += \
+ riscv/riscv/mach_riscv.srv \
+ riscv/riscv/riscvasm.sym \
+ riscv/ldscript \
+ riscv/include/asm/ \
+ riscv/include/
+
+libkernel_a_SOURCES += \
+ riscv/riscv/boothdr.S \
+ riscv/riscv/setup.c \
+ riscv/riscv/model_dep.c \
+ riscv/riscv/model_dep.h
+
+#
+# Automatically generated source files.
+#
+# See Makerules.mig.am.
+#
+
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ riscv/riscv/mach_riscv.server.defs.c
+nodist_libkernel_a_SOURCES += \
+ riscv/riscv/mach_riscv.server.h \
+ riscv/riscv/mach_riscv.server.c \
+ riscv/riscv/mach_riscv.server.msgids
+# riscv/riscv/mach_riscv.server.defs
+
+nodist_libkernel_a_SOURCES += \
+ riscv/riscv/riscvasm.h
+
+#
+# Installation.
+#
+
+include_mach_riscvdir = $(includedir)/mach/riscv
+include_mach_riscv_HEADERS = \
+ riscv/include/mach/riscv/asm.h \
+ riscv/include/mach/riscv/boolean.h \
+ riscv/include/mach/riscv/eflags.h \
+ riscv/include/mach/riscv/exception.h \
+ riscv/include/mach/riscv/fp_reg.h \
+ riscv/include/mach/riscv/ioccom.h \
+ riscv/include/mach/riscv/kern_return.h \
+ riscv/include/mach/riscv/mach_riscv.defs \
+ riscv/include/mach/riscv/mach_riscv_types.h \
+ riscv/include/mach/riscv/machine_types.defs \
+ riscv/include/mach/riscv/multiboot.h \
+ riscv/include/mach/riscv/syscall_sw.h \
+ riscv/include/mach/riscv/thread_status.h \
+ riscv/include/mach/riscv/trap.h \
+ riscv/include/mach/riscv/vm_param.h \
+ riscv/include/mach/riscv/vm_types.h
+
+#
+# Architecture specialities.
+#
+
+gnumach_LINKFLAGS += \
+ --defsym _START_MAP=0x80100000 \
+ --defsym _START=0x80100000 \
+ -T '$(srcdir)'/riscv/ldscript
diff --git a/riscv/README b/riscv/README
new file mode 100644
index 0000000..b119c91
--- /dev/null
+++ b/riscv/README
@@ -0,0 +1,80 @@
+
+setup riscv-gnu-toolchain
+--------------------------
+$ git clone https://github.com/riscv-collab/riscv-gnu-toolchain
+$ ./configure --prefix=/opt/riscv
+$ make
+
+
+setup opensbi
+--------------
+git clone https://github.com/riscv-software-src/opensbi.git
+cd opensbi
+make ARCH=riscv CROSS_COMPILE=riscv64-unknown-elf- PLATFORM=generic
+
+
+setup mig
+---------
+TARGET_CPPFLAGS=-I"$GNU"/include ../configure --prefix="$GNU" --target=riscv64-unknown-elf TARGET_CC=riscv64-unknown-elf-gcc
+
+setup gnumach
+--------------
+$ mkdir build
+$ cd build
+$ ../configure --prefix= --host=riscv64-unknown-elf LD=riscv64-unknown-elf-ld CC=riscv64-unknown-elf-gcc
+$ make gnumach
+
+You may need to setup libc6-dev-riscv64-cross
+
+run it qemu
+-----------
+qemu-system-riscv64 -M virt -m 128M -nographic -bios YOUR_PATH/opensbi/build/platform/generic/firmware/fw_dynamic.bin -kernel gnumach
+
+
+adjusted for gnumach
+---------------------
+linux-6.x.x/arch/riscv/kernel/head.S -> gnumach/riscv/riscv/boothdr.S
+
+linux-6.x.x/arch/riscv/include/asm/ -> gnumach/riscv/include/asm/
+linux-6.x.x/arch/riscv/include/uapi/ -> gnumach/riscv/include/uapi/
+
+You will find in some header files "#include <generated/*>"
+This generated directory will be generated after you configure/build linux kernel.
+
+ $ make ARCH=riscv CROSS_COMPILE=riscv64-unknown-linux-gnu- defconfig
+
+You may need to install "gcc-riscv64-linux-gnu", if "riscv64-unknown-elf" fails to configure/build linux kernel.
+
+warning:
+--------
+This is *not* the directory where all the necessary files will be generated - "linux-6.x.x/arch/riscv/include/generated/"
+Instead you need to check - "linux-6.x.x/include/generated/" (specially for files like asm-offsets.h).
+
+linux-6.x.x/include/generated/ -> gnumach/riscv/include/generated/
+
+
+links added for asm, uapi, and generated, in gnumach/riscv/configfrag.ac
+
+AC_CONFIG_LINKS([asm:$systype/include/asm
+ uapi:$systype/include/uapi
+ generated:$systype/include/generated])
+
+
+Pending implementations
+-----------------------
+gnumach/riscv/riscv/{const.h, setup.c}
+
+
+resources in sifive.com blog
+-----------------------------
+https://www.sifive.com/blog/all-aboard-part-1-compiler-args
+https://www.sifive.com/blog/all-aboard-part-2-relocations
+https://www.sifive.com/blog/all-aboard-part-3-linker-relaxation-in-riscv-toolchain
+https://www.sifive.com/blog/all-aboard-part-4-risc-v-code-models
+https://www.sifive.com/blog/all-aboard-part-5-risc-v-multilib
+https://www.sifive.com/blog/all-aboard-part-6-booting-a-risc-v-linux-kernel
+https://www.sifive.com/blog/all-aboard-part-7-entering-and-exiting-the-linux-kernel-on-risc-v
+https://www.sifive.com/blog/all-aboard-part-8-the-risc-v-linux-port-is-upstream
+https://www.sifive.com/blog/all-aboard-part-9-paging-and-mmu-in-risc-v-linux-kernel
+https://www.sifive.com/blog/all-aboard-part-10-how-to-contribute-to-the-risc-v-software-ecosystem
+https://www.sifive.com/blog/all-aboard-part-11-risc-v-hackathon-presented-by-sifive
diff --git a/riscv/configfrag.ac b/riscv/configfrag.ac
new file mode 100644
index 0000000..e20634b
--- /dev/null
+++ b/riscv/configfrag.ac
@@ -0,0 +1,91 @@
+dnl Configure fragment for riscv.
+
+dnl Copyright (C) 1999, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+
+dnl Permission to use, copy, modify and distribute this software and its
+dnl documentation is hereby granted, provided that both the copyright
+dnl notice and this permission notice appear in all copies of the
+dnl software, derivative works or modified versions, and any portions
+dnl thereof, and that both notices appear in supporting documentation.
+dnl
+dnl THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS
+dnl "AS IS" CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY
+dnl LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE
+dnl USE OF THIS SOFTWARE.
+
+#
+# Definitions.
+#
+
+[case $host_cpu in
+ riscv32)]
+ AM_CONDITIONAL([HOST_riscv32], [true])
+
+ CFLAGS="$CFLAGS -mcmodel=medany -march=rv64gcv"
+
+ # Some of the riscv-specific code checks for these.
+ AC_DEFINE([__ELF__], [1], [__ELF__])
+
+ # Determines the size of the CPU cache line.
+ AC_DEFINE([CPU_L1_SHIFT], [6], [CPU_L1_SHIFT])
+
+ [# Does the architecture provide machine-specific interfaces?
+ mach_machine_routines=1;;
+ *)]
+ AM_CONDITIONAL([HOST_riscv32], [false])[;;
+esac
+
+case $host_cpu in
+ riscv64)]
+ AM_CONDITIONAL([HOST_riscv64], [true])
+
+ CFLAGS="$CFLAGS -mcmodel=medany -march=rv64gcv"
+
+ # Some of the riscv-specific code checks for these.
+ AC_DEFINE([__ELF__], [1], [__ELF__])
+
+ # Determines the size of the CPU cache line.
+ AC_DEFINE([CPU_L1_SHIFT], [6], [CPU_L1_SHIFT])
+
+ [# Does the architecture provide machine-specific interfaces?
+ mach_machine_routines=1;;
+ *)]
+ AM_CONDITIONAL([HOST_riscv64], [false])[;;
+esac
+
+case $host_platform in
+ riscv)]
+ AM_CONDITIONAL([PLATFORM_riscv], [true])[;;
+ *)]
+ AM_CONDITIONAL([PLATFORM_riscv], [false])[;;
+esac]
+
+#
+# Options.
+#
+
+# The immediate console, useful for debugging early system
+# initialization. Disabled by default.
+AC_DEFINE([ENABLE_IMMEDIATE_CONSOLE], [0], [ENABLE_IMMEDIATE_CONSOLE])
+
+AC_ARG_ENABLE([apic],
+ AS_HELP_STRING([--enable-apic], [LAPIC/IOAPIC support]))
+[if [ x"$enable_apic" = xyes ]; then]
+ AC_DEFINE([APIC], [1], [APIC support])
+ AM_CONDITIONAL([enable_apic], [true])
+[else]
+ AM_CONDITIONAL([enable_apic], [false])
+[fi]
+
+AC_ARG_WITH([_START_MAP],
+ AS_HELP_STRING([--with-_START_MAP=0x1000000], [specify kernel mapping start address]),
+ [_START_MAP="$withval"], [_START_MAP=0x1000000])
+AC_SUBST(_START_MAP)
+
+AC_CONFIG_LINKS([asm:$systype/include/asm
+ uapi:$systype/include/uapi
+ generated:$systype/include/generated])
+
+dnl Local Variables:
+dnl mode: autoconf
+dnl End:
diff --git a/riscv/include/asm/Kbuild b/riscv/include/asm/Kbuild
new file mode 100644
index 0000000..504f8b7
--- /dev/null
+++ b/riscv/include/asm/Kbuild
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += early_ioremap.h
+generic-y += flat.h
+generic-y += kvm_para.h
+generic-y += parport.h
+generic-y += spinlock.h
+generic-y += spinlock_types.h
+generic-y += qrwlock.h
+generic-y += qrwlock_types.h
+generic-y += user.h
+generic-y += vmlinux.lds.h
diff --git a/riscv/include/asm/acenv.h b/riscv/include/asm/acenv.h
new file mode 100644
index 0000000..43ae2e3
--- /dev/null
+++ b/riscv/include/asm/acenv.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * RISC-V specific ACPICA environments and implementation
+ */
+
+#ifndef _ASM_ACENV_H
+#define _ASM_ACENV_H
+
+/* This header is required unconditionally by the ACPI core */
+
+#endif /* _ASM_ACENV_H */
diff --git a/riscv/include/asm/acpi.h b/riscv/include/asm/acpi.h
new file mode 100644
index 0000000..7dad0cf
--- /dev/null
+++ b/riscv/include/asm/acpi.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ *
+ * Copyright (C) 2021-2023, Ventana Micro Systems Inc.
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+/* Basic configuration for ACPI */
+#ifdef CONFIG_ACPI
+
+typedef u64 phys_cpuid_t;
+#define PHYS_CPUID_INVALID INVALID_HARTID
+
+/* ACPI table mapping after acpi_permanent_mmap is set */
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+#define acpi_os_ioremap acpi_os_ioremap
+
+#define acpi_strict 1 /* No out-of-spec workarounds on RISC-V */
+extern int acpi_disabled;
+extern int acpi_noirq;
+extern int acpi_pci_disabled;
+
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+static inline void enable_acpi(void)
+{
+ acpi_disabled = 0;
+ acpi_pci_disabled = 0;
+ acpi_noirq = 0;
+}
+
+/*
+ * The ACPI processor driver for ACPI core code needs this macro
+ * to find out whether this cpu was already mapped (mapping from CPU hardware
+ * ID to CPU logical ID) or not.
+ */
+#define cpu_physical_id(cpu) cpuid_to_hartid_map(cpu)
+
+/*
+ * Since MADT must provide at least one RINTC structure, the
+ * CPU will be always available in MADT on RISC-V.
+ */
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return true;
+}
+
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+
+void acpi_init_rintc_map(void);
+struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu);
+u32 get_acpi_id_for_cpu(int cpu);
+int acpi_get_riscv_isa(struct acpi_table_header *table,
+ unsigned int cpu, const char **isa);
+
+static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
+void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
+ u32 *cboz_size, u32 *cbop_size);
+#else
+static inline void acpi_init_rintc_map(void) { }
+static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
+{
+ return NULL;
+}
+
+static inline int acpi_get_riscv_isa(struct acpi_table_header *table,
+ unsigned int cpu, const char **isa)
+{
+ return -EINVAL;
+}
+
+static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
+ u32 *cbom_size, u32 *cboz_size,
+ u32 *cbop_size) { }
+
+#endif /* CONFIG_ACPI */
+
+#endif /*_ASM_ACPI_H*/
diff --git a/riscv/include/asm/alternative-macros.h b/riscv/include/asm/alternative-macros.h
new file mode 100644
index 0000000..721ec27
--- /dev/null
+++ b/riscv/include/asm/alternative-macros.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ALTERNATIVE_MACROS_H
+#define __ASM_ALTERNATIVE_MACROS_H
+
+#ifdef CONFIG_RISCV_ALTERNATIVE
+
+#ifdef __ASSEMBLY__
+
+.macro ALT_ENTRY oldptr newptr vendor_id patch_id new_len
+ .4byte \oldptr - .
+ .4byte \newptr - .
+ .2byte \vendor_id
+ .2byte \new_len
+ .4byte \patch_id
+.endm
+
+.macro ALT_NEW_CONTENT vendor_id, patch_id, enable = 1, new_c
+ .if \enable
+ .pushsection .alternative, "a"
+ ALT_ENTRY 886b, 888f, \vendor_id, \patch_id, 889f - 888f
+ .popsection
+ .subsection 1
+888 :
+ .option push
+ .option norvc
+ .option norelax
+ \new_c
+ .option pop
+889 :
+ .org . - (889b - 888b) + (887b - 886b)
+ .org . - (887b - 886b) + (889b - 888b)
+ .previous
+ .endif
+.endm
+
+.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, patch_id, enable
+886 :
+ .option push
+ .option norvc
+ .option norelax
+ \old_c
+ .option pop
+887 :
+ ALT_NEW_CONTENT \vendor_id, \patch_id, \enable, "\new_c"
+.endm
+
+.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \
+ new_c_2, vendor_id_2, patch_id_2, enable_2
+ ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \patch_id_1, \enable_1
+ ALT_NEW_CONTENT \vendor_id_2, \patch_id_2, \enable_2, "\new_c_2"
+.endm
+
+#define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__
+#define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/asm.h>
+#include <linux/stringify.h>
+
+#define ALT_ENTRY(oldptr, newptr, vendor_id, patch_id, newlen) \
+ ".4byte ((" oldptr ") - .) \n" \
+ ".4byte ((" newptr ") - .) \n" \
+ ".2byte " vendor_id "\n" \
+ ".2byte " newlen "\n" \
+ ".4byte " patch_id "\n"
+
+#define ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c) \
+ ".if " __stringify(enable) " == 1\n" \
+ ".pushsection .alternative, \"a\"\n" \
+ ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(patch_id), "889f - 888f") \
+ ".popsection\n" \
+ ".subsection 1\n" \
+ "888 :\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ ".option norelax\n" \
+ new_c "\n" \
+ ".option pop\n" \
+ "889 :\n" \
+ ".org . - (887b - 886b) + (889b - 888b)\n" \
+ ".org . - (889b - 888b) + (887b - 886b)\n" \
+ ".previous\n" \
+ ".endif\n"
+
+#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, enable) \
+ "886 :\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ ".option norelax\n" \
+ old_c "\n" \
+ ".option pop\n" \
+ "887 :\n" \
+ ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c)
+
+#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \
+ new_c_2, vendor_id_2, patch_id_2, enable_2) \
+ __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1) \
+ ALT_NEW_CONTENT(vendor_id_2, patch_id_2, enable_2, new_c_2)
+
+#endif /* __ASSEMBLY__ */
+
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, CONFIG_k) \
+ __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, IS_ENABLED(CONFIG_k))
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_c_2, vendor_id_2, patch_id_2, CONFIG_k_2) \
+ __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, IS_ENABLED(CONFIG_k_1), \
+ new_c_2, vendor_id_2, patch_id_2, IS_ENABLED(CONFIG_k_2))
+
+#else /* CONFIG_RISCV_ALTERNATIVE */
+#ifdef __ASSEMBLY__
+
+.macro ALTERNATIVE_CFG old_c
+ \old_c
+.endm
+
+#define _ALTERNATIVE_CFG(old_c, ...) \
+ ALTERNATIVE_CFG old_c
+
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ ALTERNATIVE_CFG old_c
+
+#else /* !__ASSEMBLY__ */
+
+#define __ALTERNATIVE_CFG(old_c) \
+ old_c "\n"
+
+#define _ALTERNATIVE_CFG(old_c, ...) \
+ __ALTERNATIVE_CFG(old_c)
+
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ __ALTERNATIVE_CFG(old_c)
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_RISCV_ALTERNATIVE */
+
+/*
+ * Usage:
+ * ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k)
+ * in the assembly code. Otherwise,
+ * asm(ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k));
+ *
+ * old_content: The old content which is probably replaced with new content.
+ * new_content: The new content.
+ * vendor_id: The CPU vendor ID.
+ * patch_id: The patch ID (erratum ID or cpufeature ID).
+ * CONFIG_k: The Kconfig of this patch ID. When Kconfig is disabled, the old
+ * content will always be executed.
+ */
+#define ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) \
+ _ALTERNATIVE_CFG(old_content, new_content, vendor_id, patch_id, CONFIG_k)
+
+/*
+ * A vendor wants to replace an old_content, but another vendor has used
+ * ALTERNATIVE() to patch its customized content at the same location. In
+ * this case, this vendor can create a new macro ALTERNATIVE_2() based
+ * on the following sample code and then replace ALTERNATIVE() with
+ * ALTERNATIVE_2() to append its customized content.
+ */
+#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2) \
+ _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2)
+
+#endif
diff --git a/riscv/include/asm/alternative.h b/riscv/include/asm/alternative.h
new file mode 100644
index 0000000..3c2b59b
--- /dev/null
+++ b/riscv/include/asm/alternative.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Sifive.
+ */
+
+#ifndef __ASM_ALTERNATIVE_H
+#define __ASM_ALTERNATIVE_H
+
+#include <asm/alternative-macros.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_ALTERNATIVE
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <asm/hwcap.h>
+
+#define PATCH_ID_CPUFEATURE_ID(p) lower_16_bits(p)
+#define PATCH_ID_CPUFEATURE_VALUE(p) upper_16_bits(p)
+
+#define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */
+#define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */
+#define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */
+
+/* add the relative offset to the address of the offset to get the absolute address */
+#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
+#define ALT_OLD_PTR(a) __ALT_PTR(a, old_offset)
+#define ALT_ALT_PTR(a) __ALT_PTR(a, alt_offset)
+
+void __init apply_boot_alternatives(void);
+void __init apply_early_boot_alternatives(void);
+void apply_module_alternatives(void *start, size_t length);
+
+void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
+ int patch_offset);
+
+struct alt_entry {
+ s32 old_offset; /* offset relative to original instruction or data */
+ s32 alt_offset; /* offset relative to replacement instruction or data */
+ u16 vendor_id; /* CPU vendor ID */
+ u16 alt_len; /* The replacement size */
+ u32 patch_id; /* The patch ID (erratum ID or cpufeature ID) */
+};
+
+void andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+
+void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned int stage);
+
+#else /* CONFIG_RISCV_ALTERNATIVE */
+
+static inline void apply_boot_alternatives(void) { }
+static inline void apply_early_boot_alternatives(void) { }
+static inline void apply_module_alternatives(void *start, size_t length) { }
+
+#endif /* CONFIG_RISCV_ALTERNATIVE */
+
+#endif
+#endif
diff --git a/riscv/include/asm/asm-extable.h b/riscv/include/asm/asm-extable.h
new file mode 100644
index 0000000..00a96e7
--- /dev/null
+++ b/riscv/include/asm/asm-extable.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_ASM_EXTABLE_H
+#define __ASM_ASM_EXTABLE_H
+
+#define EX_TYPE_NONE 0
+#define EX_TYPE_FIXUP 1
+#define EX_TYPE_BPF 2
+#define EX_TYPE_UACCESS_ERR_ZERO 3
+
+#ifdef CONFIG_MMU
+
+#ifdef __ASSEMBLY__
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ .pushsection __ex_table, "a"; \
+ .balign 4; \
+ .long ((insn) - .); \
+ .long ((fixup) - .); \
+ .short (type); \
+ .short (data); \
+ .popsection;
+
+ .macro _asm_extable, insn, fixup
+ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
+ .endm
+
+#else /* __ASSEMBLY__ */
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+#include <asm/gpr-num.h>
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ ".pushsection __ex_table, \"a\"\n" \
+ ".balign 4\n" \
+ ".long ((" insn ") - .)\n" \
+ ".long ((" fixup ") - .)\n" \
+ ".short (" type ")\n" \
+ ".short (" data ")\n" \
+ ".popsection\n"
+
+#define _ASM_EXTABLE(insn, fixup) \
+ __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
+
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(4, 0)
+#define EX_DATA_REG_ZERO_SHIFT 5
+#define EX_DATA_REG_ZERO GENMASK(9, 5)
+
+#define EX_DATA_REG(reg, gpr) \
+ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
+
+#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_UACCESS_ERR_ZERO), \
+ "(" \
+ EX_DATA_REG(ERR, err) " | " \
+ EX_DATA_REG(ZERO, zero) \
+ ")")
+
+#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
+
+#endif /* __ASSEMBLY__ */
+
+#else /* CONFIG_MMU */
+ #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)
+#endif /* CONFIG_MMU */
+
+#endif /* __ASM_ASM_EXTABLE_H */
diff --git a/riscv/include/asm/asm-offsets.h b/riscv/include/asm/asm-offsets.h
new file mode 100644
index 0000000..d370ee3
--- /dev/null
+++ b/riscv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/riscv/include/asm/asm-prototypes.h b/riscv/include/asm/asm-prototypes.h
new file mode 100644
index 0000000..36b955c
--- /dev/null
+++ b/riscv/include/asm/asm-prototypes.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+#define _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+long long __lshrti3(long long a, int b);
+long long __ashrti3(long long a, int b);
+long long __ashlti3(long long a, int b);
+
+
+#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)
+
+DECLARE_DO_ERROR_INFO(do_trap_unknown);
+DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_insn_fault);
+DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);
+DECLARE_DO_ERROR_INFO(do_trap_load_fault);
+DECLARE_DO_ERROR_INFO(do_trap_load_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_fault);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_u);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+DECLARE_DO_ERROR_INFO(do_trap_break);
+
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void do_irq(struct pt_regs *regs);
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/riscv/include/asm/asm.h b/riscv/include/asm/asm.h
new file mode 100644
index 0000000..b0487b3
--- /dev/null
+++ b/riscv/include/asm/asm.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_ASM_H
+#define _ASM_RISCV_ASM_H
+
+#ifdef __ASSEMBLY__
+#define __ASM_STR(x) x
+#else
+#define __ASM_STR(x) #x
+#endif
+
+#if __riscv_xlen == 64
+#define __REG_SEL(a, b) __ASM_STR(a)
+#elif __riscv_xlen == 32
+#define __REG_SEL(a, b) __ASM_STR(b)
+#else
+#error "Unexpected __riscv_xlen"
+#endif
+
+#define REG_L __REG_SEL(ld, lw)
+#define REG_S __REG_SEL(sd, sw)
+#define REG_SC __REG_SEL(sc.d, sc.w)
+#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
+#define REG_ASM __REG_SEL(.dword, .word)
+#define SZREG __REG_SEL(8, 4)
+#define LGREG __REG_SEL(3, 2)
+
+#if __SIZEOF_POINTER__ == 8
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .dword
+#define RISCV_SZPTR 8
+#define RISCV_LGPTR 3
+#else
+#define RISCV_PTR ".dword"
+#define RISCV_SZPTR "8"
+#define RISCV_LGPTR "3"
+#endif
+#elif __SIZEOF_POINTER__ == 4
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .word
+#define RISCV_SZPTR 4
+#define RISCV_LGPTR 2
+#else
+#define RISCV_PTR ".word"
+#define RISCV_SZPTR "4"
+#define RISCV_LGPTR "2"
+#endif
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#if (__SIZEOF_INT__ == 4)
+#define RISCV_INT __ASM_STR(.word)
+#define RISCV_SZINT __ASM_STR(4)
+#define RISCV_LGINT __ASM_STR(2)
+#else
+#error "Unexpected __SIZEOF_INT__"
+#endif
+
+#if (__SIZEOF_SHORT__ == 2)
+#define RISCV_SHORT __ASM_STR(.half)
+#define RISCV_SZSHORT __ASM_STR(2)
+#define RISCV_LGSHORT __ASM_STR(1)
+#else
+#error "Unexpected __SIZEOF_SHORT__"
+#endif
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+
+/* Common assembly source macros */
+
+/*
+ * NOP sequence
+ */
+.macro nops, num
+ .rept \num
+ nop
+ .endr
+.endm
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_32BIT
+#define PER_CPU_OFFSET_SHIFT 2
+#else
+#define PER_CPU_OFFSET_SHIFT 3
+#endif
+
+.macro asm_per_cpu dst sym tmp
+ REG_L \tmp, TASK_TI_CPU_NUM(tp)
+ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+ la \dst, __per_cpu_offset
+ add \dst, \dst, \tmp
+ REG_L \tmp, 0(\dst)
+ la \dst, \sym
+ add \dst, \dst, \tmp
+.endm
+#else /* CONFIG_SMP */
+.macro asm_per_cpu dst sym tmp
+ la \dst, \sym
+.endm
+#endif /* CONFIG_SMP */
+
+.macro load_per_cpu dst ptr tmp
+ asm_per_cpu \dst \ptr \tmp
+ REG_L \dst, 0(\dst)
+.endm
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+/* gp is used as the shadow call stack pointer instead */
+.macro load_global_pointer
+.endm
+#else
+/* load __global_pointer to gp */
+.macro load_global_pointer
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+.endm
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+ /* save all GPs except x1 ~ x5 */
+ .macro save_from_x6_to_x31
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+ .endm
+
+ /* restore all GPs except x1 ~ x5 */
+ .macro restore_from_x6_to_x31
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+ .endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_ASM_H */
diff --git a/riscv/include/asm/asm.h~ b/riscv/include/asm/asm.h~
new file mode 100644
index 0000000..b0487b3
--- /dev/null
+++ b/riscv/include/asm/asm.h~
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_ASM_H
+#define _ASM_RISCV_ASM_H
+
+#ifdef __ASSEMBLY__
+#define __ASM_STR(x) x
+#else
+#define __ASM_STR(x) #x
+#endif
+
+#if __riscv_xlen == 64
+#define __REG_SEL(a, b) __ASM_STR(a)
+#elif __riscv_xlen == 32
+#define __REG_SEL(a, b) __ASM_STR(b)
+#else
+#error "Unexpected __riscv_xlen"
+#endif
+
+#define REG_L __REG_SEL(ld, lw)
+#define REG_S __REG_SEL(sd, sw)
+#define REG_SC __REG_SEL(sc.d, sc.w)
+#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
+#define REG_ASM __REG_SEL(.dword, .word)
+#define SZREG __REG_SEL(8, 4)
+#define LGREG __REG_SEL(3, 2)
+
+#if __SIZEOF_POINTER__ == 8
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .dword
+#define RISCV_SZPTR 8
+#define RISCV_LGPTR 3
+#else
+#define RISCV_PTR ".dword"
+#define RISCV_SZPTR "8"
+#define RISCV_LGPTR "3"
+#endif
+#elif __SIZEOF_POINTER__ == 4
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .word
+#define RISCV_SZPTR 4
+#define RISCV_LGPTR 2
+#else
+#define RISCV_PTR ".word"
+#define RISCV_SZPTR "4"
+#define RISCV_LGPTR "2"
+#endif
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#if (__SIZEOF_INT__ == 4)
+#define RISCV_INT __ASM_STR(.word)
+#define RISCV_SZINT __ASM_STR(4)
+#define RISCV_LGINT __ASM_STR(2)
+#else
+#error "Unexpected __SIZEOF_INT__"
+#endif
+
+#if (__SIZEOF_SHORT__ == 2)
+#define RISCV_SHORT __ASM_STR(.half)
+#define RISCV_SZSHORT __ASM_STR(2)
+#define RISCV_LGSHORT __ASM_STR(1)
+#else
+#error "Unexpected __SIZEOF_SHORT__"
+#endif
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+
+/* Common assembly source macros */
+
+/*
+ * NOP sequence
+ */
+.macro nops, num
+ .rept \num
+ nop
+ .endr
+.endm
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_32BIT
+#define PER_CPU_OFFSET_SHIFT 2
+#else
+#define PER_CPU_OFFSET_SHIFT 3
+#endif
+
+.macro asm_per_cpu dst sym tmp
+ REG_L \tmp, TASK_TI_CPU_NUM(tp)
+ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+ la \dst, __per_cpu_offset
+ add \dst, \dst, \tmp
+ REG_L \tmp, 0(\dst)
+ la \dst, \sym
+ add \dst, \dst, \tmp
+.endm
+#else /* CONFIG_SMP */
+.macro asm_per_cpu dst sym tmp
+ la \dst, \sym
+.endm
+#endif /* CONFIG_SMP */
+
+.macro load_per_cpu dst ptr tmp
+ asm_per_cpu \dst \ptr \tmp
+ REG_L \dst, 0(\dst)
+.endm
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+/* gp is used as the shadow call stack pointer instead */
+.macro load_global_pointer
+.endm
+#else
+/* load __global_pointer to gp */
+.macro load_global_pointer
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+.endm
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
+ /* save all GPs except x1 ~ x5 */
+ .macro save_from_x6_to_x31
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+ .endm
+
+ /* restore all GPs except x1 ~ x5 */
+ .macro restore_from_x6_to_x31
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+ .endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_ASM_H */
diff --git a/riscv/include/asm/assembler.h b/riscv/include/asm/assembler.h
new file mode 100644
index 0000000..44b1457
--- /dev/null
+++ b/riscv/include/asm/assembler.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
+ */
+
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+#ifndef __ASM_ASSEMBLER_H
+#define __ASM_ASSEMBLER_H
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+/*
+ * suspend_restore_csrs - restore CSRs
+ */
+ .macro suspend_restore_csrs
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+ csrw CSR_EPC, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ csrw CSR_STATUS, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+ csrw CSR_TVAL, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+ csrw CSR_CAUSE, t0
+ .endm
+
+/*
+ * suspend_restore_regs - Restore registers (except A0 and T0-T6)
+ */
+ .macro suspend_restore_regs
+ REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+ REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+ REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+ REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+ REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+ REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+ REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+ REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+ REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+ REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+ REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+ REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+ REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+ REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+ REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+ REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+ REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+ REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+ REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+ REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+ REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+ REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+ REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+ .endm
+
+/*
+ * copy_page - copy 1 page (4KB) of data from source to destination
+ * @a0 - destination
+ * @a1 - source
+ */
+ .macro copy_page a0, a1
+ lui a2, 0x1
+ add a2, a2, a0
+1 :
+ REG_L t0, 0(a1)
+ REG_L t1, SZREG(a1)
+
+ REG_S t0, 0(a0)
+ REG_S t1, SZREG(a0)
+
+ addi a0, a0, 2 * SZREG
+ addi a1, a1, 2 * SZREG
+ bne a2, a0, 1b
+ .endm
+
+#endif /* __ASM_ASSEMBLER_H */
diff --git a/riscv/include/asm/atomic.h b/riscv/include/asm/atomic.h
new file mode 100644
index 0000000..f5dfef6
--- /dev/null
+++ b/riscv/include/asm/atomic.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_ATOMIC_H
+#define _ASM_RISCV_ATOMIC_H
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+# include <asm-generic/atomic64.h>
+#else
+# if (__riscv_xlen < 64)
+# error "64-bit atomics require XLEN to be at least 64"
+# endif
+#endif
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
+
+static __always_inline int arch_atomic_read(const atomic_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC64_INIT(i) { (i) }
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+#endif
+
+/*
+ * First, the atomic ops that have no ordering constraints and therefor don't
+ * have the AQ or RL bits set. These don't return anything, so there's only
+ * one version to worry about.
+ */
+#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " zero, %1, %0" \
+ : "+A" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+} \
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_OP (op, asm_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_OP (op, asm_op, I, w, int, ) \
+ ATOMIC_OP (op, asm_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(add, add, i)
+ATOMIC_OPS(sub, add, -i)
+ATOMIC_OPS(and, and, i)
+ATOMIC_OPS( or, or, i)
+ATOMIC_OPS(xor, xor, i)
+
+#undef ATOMIC_OP
+#undef ATOMIC_OPS
+
+/*
+ * Atomic ops that have ordered, relaxed, acquire, and release variants.
+ * There's two flavors of these: the arithmatic ops have both fetch and return
+ * versions, while the logical ops only have fetch versions.
+ */
+#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
+ atomic##prefix##_t *v) \
+{ \
+ register c_type ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " %1, %2, %0" \
+ : "+A" (v->counter), "=r" (ret) \
+ : "r" (I) \
+ : "memory"); \
+ return ret; \
+} \
+static __always_inline \
+c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ register c_type ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
+ : "+A" (v->counter), "=r" (ret) \
+ : "r" (I) \
+ : "memory"); \
+ return ret; \
+}
+
+#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
+ atomic##prefix##_t *v) \
+{ \
+ return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
+} \
+static __always_inline \
+c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+{ \
+ return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
+ ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(add, add, +, i)
+ATOMIC_OPS(sub, add, +, -i)
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#undef ATOMIC_OPS
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
+ ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(and, and, i)
+ATOMIC_OPS( or, or, i)
+ATOMIC_OPS(xor, xor, i)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#undef ATOMIC_OPS
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+
+/* This is required to provide a full barrier on success. */
+static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add %[rc], %[p], %[a]\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add %[rc], %[p], %[a]\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " bltz %[p], 1f\n"
+ " addi %[rc], %[p], 1\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev < 0);
+}
+
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+
+static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " bgtz %[p], 1f\n"
+ " addi %[rc], %[p], -1\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev > 0);
+}
+
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+
+static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " addi %[rc], %[p], -1\n"
+ " bltz %[rc], 1f\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return prev - 1;
+}
+
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " bltz %[p], 1f\n"
+ " addi %[rc], %[p], 1\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev < 0);
+}
+
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+
+static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " bgtz %[p], 1f\n"
+ " addi %[rc], %[p], -1\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev > 0);
+}
+
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+
+static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " addi %[rc], %[p], -1\n"
+ " bltz %[rc], 1f\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return prev - 1;
+}
+
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/riscv/include/asm/barrier.h b/riscv/include/asm/barrier.h
new file mode 100644
index 0000000..1107525
--- /dev/null
+++ b/riscv/include/asm/barrier.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Based on arch/arm/include/asm/barrier.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2013 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_BARRIER_H
+#define _ASM_RISCV_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() __asm__ __volatile__ ("nop")
+#define __nops(n) ".rept " #n "\nnop\n.endr\n"
+#define nops(n) __asm__ __volatile__ (__nops(n))
+
+#define RISCV_FENCE(p, s) \
+ __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
+
+/* These barriers need to enforce ordering on both devices or memory. */
+#define mb() RISCV_FENCE(iorw,iorw)
+#define rmb() RISCV_FENCE(ir,ir)
+#define wmb() RISCV_FENCE(ow,ow)
+
+/* These barriers do not need to enforce ordering on devices, just memory. */
+#define __smp_mb() RISCV_FENCE(rw,rw)
+#define __smp_rmb() RISCV_FENCE(r,r)
+#define __smp_wmb() RISCV_FENCE(w,w)
+
+#define __smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ RISCV_FENCE(rw,w); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ RISCV_FENCE(r,rw); \
+ ___p1; \
+})
+
+/*
+ * This is a very specific barrier: it's currently only used in two places in
+ * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
+ * orderings it guarantees, but the "critical section is RCsc" guarantee
+ * mandates a barrier on RISC-V. The sequence looks like:
+ *
+ * lr.aq lock
+ * sc lock <= LOCKED
+ * smp_mb__after_spinlock()
+ * // critical section
+ * lr lock
+ * sc.rl lock <= UNLOCKED
+ *
+ * The AQ/RL pair provides a RCpc critical section, but there's not really any
+ * way we can take advantage of that here because the ordering is only enforced
+ * on that one lock. Thus, we're just doing a full fence.
+ *
+ * Since we allow writeX to be called from preemptive regions we need at least
+ * an "o" in the predecessor set to ensure device writes are visible before the
+ * task is marked as available for scheduling on a new hart. While I don't see
+ * any concrete reason we need a full IO fence, it seems safer to just upgrade
+ * this in order to avoid any IO crossing a scheduling boundary. In both
+ * instances the scheduler pairs this with an mb(), so nothing is necessary on
+ * the new hart.
+ */
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_BARRIER_H */
diff --git a/riscv/include/asm/bitops.h b/riscv/include/asm/bitops.h
new file mode 100644
index 0000000..224b4dc
--- /dev/null
+++ b/riscv/include/asm/bitops.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_BITOPS_H
+#define _ASM_RISCV_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error "Only <linux/bitops.h> can be included directly"
+#endif /* _LINUX_BITOPS_H */
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/bitsperlong.h>
+
+#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE)
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+
+#else
+#include <asm/alternative-macros.h>
+#include <asm/hwcap.h>
+
+#if (BITS_PER_LONG == 64)
+#define CTZW "ctzw "
+#define CLZW "clzw "
+#elif (BITS_PER_LONG == 32)
+#define CTZW "ctz "
+#define CLZW "clz "
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+static __always_inline unsigned long variable__ffs(unsigned long word)
+{
+ int num;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ "ctz %0, %1\n"
+ ".option pop\n"
+ : "=r" (word) : "r" (word) :);
+
+ return word;
+
+legacy:
+ num = 0;
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+/**
+ * __ffs - find first set bit in a long word
+ * @word: The word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+#define __ffs(word) \
+ (__builtin_constant_p(word) ? \
+ (unsigned long)__builtin_ctzl(word) : \
+ variable__ffs(word))
+
+static __always_inline unsigned long variable__fls(unsigned long word)
+{
+ int num;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ "clz %0, %1\n"
+ ".option pop\n"
+ : "=r" (word) : "r" (word) :);
+
+ return BITS_PER_LONG - 1 - word;
+
+legacy:
+ num = BITS_PER_LONG - 1;
+#if BITS_PER_LONG == 64
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG - 16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG - 1))))
+ num -= 1;
+ return num;
+}
+
+/**
+ * __fls - find last set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+#define __fls(word) \
+ (__builtin_constant_p(word) ? \
+ (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \
+ variable__fls(word))
+
+static __always_inline int variable_ffs(int x)
+{
+ int r;
+
+ if (!x)
+ return 0;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ CTZW "%0, %1\n"
+ ".option pop\n"
+ : "=r" (r) : "r" (x) :);
+
+ return r + 1;
+
+legacy:
+ r = 1;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+/**
+ * ffs - find first set bit in a word
+ * @x: the word to search
+ *
+ * This is defined the same way as the libc and compiler builtin ffs routines.
+ *
+ * ffs(value) returns 0 if value is 0 or the position of the first set bit if
+ * value is nonzero. The first (least significant) bit is at position 1.
+ */
+#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
+
+static __always_inline int variable_fls(unsigned int x)
+{
+ int r;
+
+ if (!x)
+ return 0;
+
+ asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm volatile (".option push\n"
+ ".option arch,+zbb\n"
+ CLZW "%0, %1\n"
+ ".option pop\n"
+ : "=r" (r) : "r" (x) :);
+
+ return 32 - r;
+
+legacy:
+ r = 32;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/**
+ * fls - find last set bit in a word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as ffs, but returns the position of the most
+ * significant set bit.
+ *
+ * fls(value) returns 0 if value is 0 or the position of the last set bit if
+ * value is nonzero. The last (most significant) bit is at position 32.
+ */
+#define fls(x) \
+({ \
+ typeof(x) x_ = (x); \
+ __builtin_constant_p(x_) ? \
+ (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \
+ : \
+ variable_fls(x_); \
+})
+
+#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+
+#include <asm-generic/bitops/hweight.h>
+
+#if (BITS_PER_LONG == 64)
+#define __AMO(op) "amo" #op ".d"
+#elif (BITS_PER_LONG == 32)
+#define __AMO(op) "amo" #op ".w"
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
+({ \
+ unsigned long __res, __mask; \
+ __mask = BIT_MASK(nr); \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " %0, %2, %1" \
+ : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(__mask)) \
+ : "memory"); \
+ ((__res & __mask) != 0); \
+})
+
+#define __op_bit_ord(op, mod, nr, addr, ord) \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " zero, %1, %0" \
+ : "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(BIT_MASK(nr))) \
+ : "memory");
+
+#define __test_and_op_bit(op, mod, nr, addr) \
+ __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
+#define __op_bit(op, mod, nr, addr) \
+ __op_bit_ord(op, mod, nr, addr, )
+
+/* Bitmask modifiers */
+#define __NOP(x) (x)
+#define __NOT(x) (~(x))
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation may be reordered on other architectures than x86.
+ */
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation can be reordered on other architectures other than x86.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ */
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() may be reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+static inline int test_and_set_bit_lock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
+}
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ __op_bit_ord(and, __NOT, nr, addr, .rl);
+}
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ *
+ * On RISC-V systems there seems to be no benefit to taking advantage of the
+ * non-atomic property here: it's a lot more instructions and we still have to
+ * provide release semantics anyway.
+ */
+static inline void __clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ clear_bit_unlock(nr, addr);
+}
+
+static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *addr)
+{
+ unsigned long res;
+ __asm__ __volatile__ (
+ __AMO(xor) ".rl %0, %2, %1"
+ : "=r" (res), "+A" (*addr)
+ : "r" (__NOP(mask))
+ : "memory");
+ return (res & BIT(7)) != 0;
+}
+
+#undef __test_and_op_bit
+#undef __op_bit
+#undef __NOP
+#undef __NOT
+#undef __AMO
+
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* _ASM_RISCV_BITOPS_H */
diff --git a/riscv/include/asm/bug.h b/riscv/include/asm/bug.h
new file mode 100644
index 0000000..1aaea81
--- /dev/null
+++ b/riscv/include/asm/bug.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_BUG_H
+#define _ASM_RISCV_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#include <asm/asm.h>
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_32 _UL(0x3)
+#define __COMPRESSED_INSN_MASK _UL(0xffff)
+
+#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
+#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
+
+#define GET_INSN_LENGTH(insn) \
+({ \
+ unsigned long __len; \
+ __len = ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? \
+ 4UL : 2UL; \
+ __len; \
+})
+
+typedef u32 bug_insn_t;
+
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+#define __BUG_ENTRY_ADDR RISCV_INT " 1b - ."
+#define __BUG_ENTRY_FILE RISCV_INT " %0 - ."
+#else
+#define __BUG_ENTRY_ADDR RISCV_PTR " 1b"
+#define __BUG_ENTRY_FILE RISCV_PTR " %0"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR "\n\t" \
+ __BUG_ENTRY_FILE "\n\t" \
+ RISCV_SHORT " %1\n\t" \
+ RISCV_SHORT " %2"
+#else
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR "\n\t" \
+ RISCV_SHORT " %2"
+#endif
+
+#ifdef CONFIG_GENERIC_BUG
+#define __BUG_FLAGS(flags) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\n\t" \
+ "ebreak\n" \
+ ".pushsection __bug_table,\"aw\"\n\t" \
+ "2:\n\t" \
+ __BUG_ENTRY "\n\t" \
+ ".org 2b + %3\n\t" \
+ ".popsection" \
+ : \
+ : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+#else /* CONFIG_GENERIC_BUG */
+#define __BUG_FLAGS(flags) do { \
+ __asm__ __volatile__ ("ebreak\n"); \
+} while (0)
+#endif /* CONFIG_GENERIC_BUG */
+
+#define BUG() do { \
+ __BUG_FLAGS(0); \
+ unreachable(); \
+} while (0)
+
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+struct task_struct;
+
+void __show_regs(struct pt_regs *regs);
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
+
+#endif /* _ASM_RISCV_BUG_H */
diff --git a/riscv/include/asm/cache.h b/riscv/include/asm/cache.h
new file mode 100644
index 0000000..2174fe7
--- /dev/null
+++ b/riscv/include/asm/cache.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CACHE_H
+#define _ASM_RISCV_CACHE_H
+
+#define L1_CACHE_SHIFT 6
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_KMALLOC_MINALIGN (8)
+#endif
+
+/*
+ * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
+ * the flat loader aligns it accordingly.
+ */
+#ifndef CONFIG_MMU
+#define ARCH_SLAB_MINALIGN 16
+#endif
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+extern int dma_cache_alignment;
+#define dma_get_cache_alignment dma_get_cache_alignment
+static inline int dma_get_cache_alignment(void)
+{
+ return dma_cache_alignment;
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CACHE_H */
diff --git a/riscv/include/asm/cacheflush.h b/riscv/include/asm/cacheflush.h
new file mode 100644
index 0000000..a129dac
--- /dev/null
+++ b/riscv/include/asm/cacheflush.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CACHEFLUSH_H
+#define _ASM_RISCV_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+static inline void local_flush_icache_all(void)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
+
+#define PG_dcache_clean PG_arch_1
+
+static inline void flush_dcache_folio(struct folio *folio)
+{
+ if (test_bit(PG_dcache_clean, &folio->flags))
+ clear_bit(PG_dcache_clean, &folio->flags);
+}
+#define flush_dcache_folio flush_dcache_folio
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
+
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range(start, end) flush_icache_all()
+#define flush_icache_user_page(vma, pg, addr, len) \
+ flush_icache_mm(vma->vm_mm, 0)
+
+#ifdef CONFIG_64BIT
+#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
+#endif
+
+#ifndef CONFIG_SMP
+
+#define flush_icache_all() local_flush_icache_all()
+#define flush_icache_mm(mm, local) flush_icache_all()
+
+#else /* CONFIG_SMP */
+
+void flush_icache_all(void);
+void flush_icache_mm(struct mm_struct *mm, bool local);
+
+#endif /* CONFIG_SMP */
+
+extern unsigned int riscv_cbom_block_size;
+extern unsigned int riscv_cboz_block_size;
+void riscv_init_cbo_blocksizes(void);
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+void riscv_noncoherent_supported(void);
+void __init riscv_set_dma_cache_alignment(void);
+#else
+static inline void riscv_noncoherent_supported(void) {}
+static inline void riscv_set_dma_cache_alignment(void) {}
+#endif
+
+/*
+ * Bits in sys_riscv_flush_icache()'s flags argument.
+ */
+#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
+#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/riscv/include/asm/cacheinfo.h b/riscv/include/asm/cacheinfo.h
new file mode 100644
index 0000000..d1a3652
--- /dev/null
+++ b/riscv/include/asm/cacheinfo.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_CACHEINFO_H
+#define _ASM_RISCV_CACHEINFO_H
+
+#include <linux/cacheinfo.h>
+
+struct riscv_cacheinfo_ops {
+ const struct attribute_group * (*get_priv_group)(struct cacheinfo
+ *this_leaf);
+};
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops);
+uintptr_t get_cache_size(u32 level, enum cache_type type);
+uintptr_t get_cache_geometry(u32 level, enum cache_type type);
+
+#endif /* _ASM_RISCV_CACHEINFO_H */
diff --git a/riscv/include/asm/cfi.h b/riscv/include/asm/cfi.h
new file mode 100644
index 0000000..56bf9d6
--- /dev/null
+++ b/riscv/include/asm/cfi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_CFI_H
+#define _ASM_RISCV_CFI_H
+
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/cfi.h>
+
+#ifdef CONFIG_CFI_CLANG
+enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+#else
+static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
+{
+ return BUG_TRAP_TYPE_NONE;
+}
+#endif /* CONFIG_CFI_CLANG */
+
+#endif /* _ASM_RISCV_CFI_H */
diff --git a/riscv/include/asm/clint.h b/riscv/include/asm/clint.h
new file mode 100644
index 0000000..0789fd3
--- /dev/null
+++ b/riscv/include/asm/clint.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef _ASM_RISCV_CLINT_H
+#define _ASM_RISCV_CLINT_H
+
+#include <linux/types.h>
+#include <asm/mmio.h>
+
+#ifdef CONFIG_RISCV_M_MODE
+/*
+ * This lives in the CLINT driver, but is accessed directly by timex.h to avoid
+ * any overhead when accessing the MMIO timer.
+ *
+ * The ISA defines mtime as a 64-bit memory-mapped register that increments at
+ * a constant frequency, but it doesn't define some other constraints we depend
+ * on (most notably ordering constraints, but also some simpler stuff like the
+ * memory layout). Thus, this is called "clint_time_val" instead of something
+ * like "riscv_mtime", to signify that these non-ISA assumptions must hold.
+ */
+extern u64 __iomem *clint_time_val;
+#endif
+
+#endif
diff --git a/riscv/include/asm/clocksource.h b/riscv/include/asm/clocksource.h
new file mode 100644
index 0000000..4821855
--- /dev/null
+++ b/riscv/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/riscv/include/asm/cmpxchg.h b/riscv/include/asm/cmpxchg.h
new file mode 100644
index 0000000..2f4726d
--- /dev/null
+++ b/riscv/include/asm/cmpxchg.h
@@ -0,0 +1,363 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CMPXCHG_H
+#define _ASM_RISCV_CMPXCHG_H
+
+#include <linux/bug.h>
+
+#include <asm/barrier.h>
+#include <asm/fence.h>
+
+#define __xchg_relaxed(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg_relaxed(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __xchg_acquire(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w %0, %2, %1\n" \
+ RISCV_ACQUIRE_BARRIER \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d %0, %2, %1\n" \
+ RISCV_ACQUIRE_BARRIER \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg_acquire(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_acquire((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __xchg_release(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ " amoswap.w %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ " amoswap.d %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg_release(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_release((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __arch_xchg(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \
+})
+
+#define xchg32(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ arch_xchg((ptr), (x)); \
+})
+
+#define xchg64(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_xchg((ptr), (x)); \
+})
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __cmpxchg_relaxed(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg_acquire(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ RISCV_ACQUIRE_BARRIER \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ RISCV_ACQUIRE_BARRIER \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_acquire(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg_release(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_release(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define arch_cmpxchg_local(ptr, o, n) \
+ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define arch_cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg((ptr), (o), (n)); \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
+})
+
+#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/riscv/include/asm/compat.h b/riscv/include/asm/compat.h
new file mode 100644
index 0000000..2ac955b
--- /dev/null
+++ b/riscv/include/asm/compat.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_COMPAT_H
+#define __ASM_COMPAT_H
+
+#define COMPAT_UTS_MACHINE "riscv\0\0"
+
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm-generic/compat.h>
+
+static inline int is_compat_task(void)
+{
+ return test_thread_flag(TIF_32BIT);
+}
+
+struct compat_user_regs_struct {
+ compat_ulong_t pc;
+ compat_ulong_t ra;
+ compat_ulong_t sp;
+ compat_ulong_t gp;
+ compat_ulong_t tp;
+ compat_ulong_t t0;
+ compat_ulong_t t1;
+ compat_ulong_t t2;
+ compat_ulong_t s0;
+ compat_ulong_t s1;
+ compat_ulong_t a0;
+ compat_ulong_t a1;
+ compat_ulong_t a2;
+ compat_ulong_t a3;
+ compat_ulong_t a4;
+ compat_ulong_t a5;
+ compat_ulong_t a6;
+ compat_ulong_t a7;
+ compat_ulong_t s2;
+ compat_ulong_t s3;
+ compat_ulong_t s4;
+ compat_ulong_t s5;
+ compat_ulong_t s6;
+ compat_ulong_t s7;
+ compat_ulong_t s8;
+ compat_ulong_t s9;
+ compat_ulong_t s10;
+ compat_ulong_t s11;
+ compat_ulong_t t3;
+ compat_ulong_t t4;
+ compat_ulong_t t5;
+ compat_ulong_t t6;
+};
+
+static inline void regs_to_cregs(struct compat_user_regs_struct *cregs,
+ struct pt_regs *regs)
+{
+ cregs->pc = (compat_ulong_t) regs->epc;
+ cregs->ra = (compat_ulong_t) regs->ra;
+ cregs->sp = (compat_ulong_t) regs->sp;
+ cregs->gp = (compat_ulong_t) regs->gp;
+ cregs->tp = (compat_ulong_t) regs->tp;
+ cregs->t0 = (compat_ulong_t) regs->t0;
+ cregs->t1 = (compat_ulong_t) regs->t1;
+ cregs->t2 = (compat_ulong_t) regs->t2;
+ cregs->s0 = (compat_ulong_t) regs->s0;
+ cregs->s1 = (compat_ulong_t) regs->s1;
+ cregs->a0 = (compat_ulong_t) regs->a0;
+ cregs->a1 = (compat_ulong_t) regs->a1;
+ cregs->a2 = (compat_ulong_t) regs->a2;
+ cregs->a3 = (compat_ulong_t) regs->a3;
+ cregs->a4 = (compat_ulong_t) regs->a4;
+ cregs->a5 = (compat_ulong_t) regs->a5;
+ cregs->a6 = (compat_ulong_t) regs->a6;
+ cregs->a7 = (compat_ulong_t) regs->a7;
+ cregs->s2 = (compat_ulong_t) regs->s2;
+ cregs->s3 = (compat_ulong_t) regs->s3;
+ cregs->s4 = (compat_ulong_t) regs->s4;
+ cregs->s5 = (compat_ulong_t) regs->s5;
+ cregs->s6 = (compat_ulong_t) regs->s6;
+ cregs->s7 = (compat_ulong_t) regs->s7;
+ cregs->s8 = (compat_ulong_t) regs->s8;
+ cregs->s9 = (compat_ulong_t) regs->s9;
+ cregs->s10 = (compat_ulong_t) regs->s10;
+ cregs->s11 = (compat_ulong_t) regs->s11;
+ cregs->t3 = (compat_ulong_t) regs->t3;
+ cregs->t4 = (compat_ulong_t) regs->t4;
+ cregs->t5 = (compat_ulong_t) regs->t5;
+ cregs->t6 = (compat_ulong_t) regs->t6;
+};
+
+static inline void cregs_to_regs(struct compat_user_regs_struct *cregs,
+ struct pt_regs *regs)
+{
+ regs->epc = (unsigned long) cregs->pc;
+ regs->ra = (unsigned long) cregs->ra;
+ regs->sp = (unsigned long) cregs->sp;
+ regs->gp = (unsigned long) cregs->gp;
+ regs->tp = (unsigned long) cregs->tp;
+ regs->t0 = (unsigned long) cregs->t0;
+ regs->t1 = (unsigned long) cregs->t1;
+ regs->t2 = (unsigned long) cregs->t2;
+ regs->s0 = (unsigned long) cregs->s0;
+ regs->s1 = (unsigned long) cregs->s1;
+ regs->a0 = (unsigned long) cregs->a0;
+ regs->a1 = (unsigned long) cregs->a1;
+ regs->a2 = (unsigned long) cregs->a2;
+ regs->a3 = (unsigned long) cregs->a3;
+ regs->a4 = (unsigned long) cregs->a4;
+ regs->a5 = (unsigned long) cregs->a5;
+ regs->a6 = (unsigned long) cregs->a6;
+ regs->a7 = (unsigned long) cregs->a7;
+ regs->s2 = (unsigned long) cregs->s2;
+ regs->s3 = (unsigned long) cregs->s3;
+ regs->s4 = (unsigned long) cregs->s4;
+ regs->s5 = (unsigned long) cregs->s5;
+ regs->s6 = (unsigned long) cregs->s6;
+ regs->s7 = (unsigned long) cregs->s7;
+ regs->s8 = (unsigned long) cregs->s8;
+ regs->s9 = (unsigned long) cregs->s9;
+ regs->s10 = (unsigned long) cregs->s10;
+ regs->s11 = (unsigned long) cregs->s11;
+ regs->t3 = (unsigned long) cregs->t3;
+ regs->t4 = (unsigned long) cregs->t4;
+ regs->t5 = (unsigned long) cregs->t5;
+ regs->t6 = (unsigned long) cregs->t6;
+};
+
+#endif /* __ASM_COMPAT_H */
diff --git a/riscv/include/asm/cpu.h b/riscv/include/asm/cpu.h
new file mode 100644
index 0000000..28d45a6
--- /dev/null
+++ b/riscv/include/asm/cpu.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+/* This header is required unconditionally by the ACPI core */
+
+#endif /* _ASM_CPU_H */
diff --git a/riscv/include/asm/cpu_ops.h b/riscv/include/asm/cpu_ops.h
new file mode 100644
index 0000000..aa12846
--- /dev/null
+++ b/riscv/include/asm/cpu_ops.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ * Based on arch/arm64/include/asm/cpu_ops.h
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the boot protocol.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there
+ * is a mechanism for doing so, tests whether it is
+ * possible to boot the given HART.
+ * @cpu_start: Boots a cpu into the kernel.
+ * @cpu_disable: Prepares a cpu to die. May fail for some
+ * mechanism-specific reason, which will cause the hot
+ * unplug to be aborted. Called from the cpu to be killed.
+ * @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from
+ * the cpu being stopped.
+ * @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another
+ * cpu.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_prepare)(unsigned int cpu);
+ int (*cpu_start)(unsigned int cpu,
+ struct task_struct *tidle);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_stop)(void);
+ int (*cpu_is_stopped)(unsigned int cpu);
+#endif
+};
+
+extern const struct cpu_operations cpu_ops_spinwait;
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+void __init cpu_set_ops(int cpu);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/riscv/include/asm/cpu_ops_sbi.h b/riscv/include/asm/cpu_ops_sbi.h
new file mode 100644
index 0000000..d6e4665
--- /dev/null
+++ b/riscv/include/asm/cpu_ops_sbi.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 by Rivos Inc.
+ */
+#ifndef __ASM_CPU_OPS_SBI_H
+#define __ASM_CPU_OPS_SBI_H
+
+#ifndef __ASSEMBLY__
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+extern const struct cpu_operations cpu_ops_sbi;
+
+/**
+ * struct sbi_hart_boot_data - Hart specific boot used during booting and
+ * cpu hotplug.
+ * @task_ptr: A pointer to the hart specific tp
+ * @stack_ptr: A pointer to the hart specific sp
+ */
+struct sbi_hart_boot_data {
+ void *task_ptr;
+ void *stack_ptr;
+};
+#endif
+
+#endif /* ifndef __ASM_CPU_OPS_SBI_H */
diff --git a/riscv/include/asm/cpufeature.h b/riscv/include/asm/cpufeature.h
new file mode 100644
index 0000000..a418c31
--- /dev/null
+++ b/riscv/include/asm/cpufeature.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022-2023 Rivos, Inc
+ */
+
+#ifndef _ASM_CPUFEATURE_H
+#define _ASM_CPUFEATURE_H
+
+#include <linux/bitmap.h>
+#include <linux/jump_label.h>
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
+#include <asm/errno.h>
+
+/*
+ * These are probed via a device_initcall(), via either the SBI or directly
+ * from the corresponding CSRs.
+ */
+struct riscv_cpuinfo {
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+};
+
+struct riscv_isainfo {
+ DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
+};
+
+DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+
+DECLARE_PER_CPU(long, misaligned_access_speed);
+
+/* Per-cpu ISA extensions. */
+extern struct riscv_isainfo hart_isa[NR_CPUS];
+
+void riscv_user_isa_enable(void);
+
+#ifdef CONFIG_RISCV_MISALIGNED
+bool unaligned_ctl_available(void);
+bool check_unaligned_access_emulated(int cpu);
+void unaligned_emulation_finish(void);
+#else
+static inline bool unaligned_ctl_available(void)
+{
+ return false;
+}
+
+static inline bool check_unaligned_access_emulated(int cpu)
+{
+ return false;
+}
+
+static inline void unaligned_emulation_finish(void) {}
+#endif
+
+unsigned long riscv_get_elf_hwcap(void);
+
+struct riscv_isa_ext_data {
+ const unsigned int id;
+ const char *name;
+ const char *property;
+};
+
+extern const struct riscv_isa_ext_data riscv_isa_ext[];
+extern const size_t riscv_isa_ext_count;
+extern bool riscv_isa_fallback;
+
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
+static __always_inline bool
+riscv_has_extension_likely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_no);
+ } else {
+ if (!__riscv_isa_extension_available(NULL, ext))
+ goto l_no;
+ }
+
+ return true;
+l_no:
+ return false;
+}
+
+static __always_inline bool
+riscv_has_extension_unlikely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_yes);
+ } else {
+ if (__riscv_isa_extension_available(NULL, ext))
+ goto l_yes;
+ }
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
+ return true;
+
+ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
+}
+
+static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
+{
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
+ return true;
+
+ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
+}
+
+#endif
diff --git a/riscv/include/asm/cpuidle.h b/riscv/include/asm/cpuidle.h
new file mode 100644
index 0000000..71fdc60
--- /dev/null
+++ b/riscv/include/asm/cpuidle.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+ /*
+ * Add mb() here to ensure that all
+ * IO/MEM accesses are completed prior
+ * to entering WFI.
+ */
+ mb();
+ wait_for_interrupt();
+}
+
+#endif
diff --git a/riscv/include/asm/crash_core.h b/riscv/include/asm/crash_core.h
new file mode 100644
index 0000000..e1874b2
--- /dev/null
+++ b/riscv/include/asm/crash_core.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _RISCV_CRASH_CORE_H
+#define _RISCV_CRASH_CORE_H
+
+#define CRASH_ALIGN PMD_SIZE
+
+#define CRASH_ADDR_LOW_MAX dma32_phys_limit
+#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
+
+extern phys_addr_t memblock_end_of_DRAM(void);
+#endif
diff --git a/riscv/include/asm/csr.h b/riscv/include/asm/csr.h
new file mode 100644
index 0000000..0d18482
--- /dev/null
+++ b/riscv/include/asm/csr.h
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CSR_H
+#define _ASM_RISCV_CSR_H
+
+#include <asm/asm.h>
+//#include <linux/bits.h>
+
+/* Status register flags */
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
+#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
+
+#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF _AC(0x00000000, UL)
+#define SR_FS_INITIAL _AC(0x00002000, UL)
+#define SR_FS_CLEAN _AC(0x00004000, UL)
+#define SR_FS_DIRTY _AC(0x00006000, UL)
+
+#define SR_VS _AC(0x00000600, UL) /* Vector Status */
+#define SR_VS_OFF _AC(0x00000000, UL)
+#define SR_VS_INITIAL _AC(0x00000200, UL)
+#define SR_VS_CLEAN _AC(0x00000400, UL)
+#define SR_VS_DIRTY _AC(0x00000600, UL)
+
+#define SR_XS _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF _AC(0x00000000, UL)
+#define SR_XS_INITIAL _AC(0x00008000, UL)
+#define SR_XS_CLEAN _AC(0x00010000, UL)
+#define SR_XS_DIRTY _AC(0x00018000, UL)
+
+#define SR_FS_VS (SR_FS | SR_VS) /* Vector and Floating-Point Unit */
+
+#ifndef CONFIG_64BIT
+#define SR_SD _AC(0x80000000, UL) /* FS/VS/XS dirty */
+#else
+#define SR_SD _AC(0x8000000000000000, UL) /* FS/VS/XS dirty */
+#endif
+
+#ifdef CONFIG_64BIT
+#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */
+#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
+#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
+#endif
+
+/* SATP flags */
+#ifndef CONFIG_64BIT
+#define SATP_PPN _AC(0x003FFFFF, UL)
+#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE_SHIFT 31
+#define SATP_ASID_BITS 9
+#define SATP_ASID_SHIFT 22
+#define SATP_ASID_MASK _AC(0x1FF, UL)
+#else
+#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39 _AC(0x8000000000000000, UL)
+#define SATP_MODE_48 _AC(0x9000000000000000, UL)
+#define SATP_MODE_57 _AC(0xa000000000000000, UL)
+#define SATP_MODE_SHIFT 60
+#define SATP_ASID_BITS 16
+#define SATP_ASID_SHIFT 44
+#define SATP_ASID_MASK _AC(0xFFFF, UL)
+#endif
+
+/* Exception cause high bit - is an interrupt if set */
+#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+
+/* Interrupt causes (minus the high bit) */
+#define IRQ_S_SOFT 1
+#define IRQ_VS_SOFT 2
+#define IRQ_M_SOFT 3
+#define IRQ_S_TIMER 5
+#define IRQ_VS_TIMER 6
+#define IRQ_M_TIMER 7
+#define IRQ_S_EXT 9
+#define IRQ_VS_EXT 10
+#define IRQ_M_EXT 11
+#define IRQ_S_GEXT 12
+#define IRQ_PMU_OVF 13
+#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1)
+#define IRQ_LOCAL_MASK GENMASK((IRQ_LOCAL_MAX - 1), 0)
+
+/* Exception causes */
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_INST_ILLEGAL 2
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_MISALIGNED 4
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_MISALIGNED 6
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_HYPERVISOR_SYSCALL 9
+#define EXC_SUPERVISOR_SYSCALL 10
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+#define EXC_INST_GUEST_PAGE_FAULT 20
+#define EXC_LOAD_GUEST_PAGE_FAULT 21
+#define EXC_VIRTUAL_INST_FAULT 22
+#define EXC_STORE_GUEST_PAGE_FAULT 23
+
+/* PMP configuration */
+#define PMP_R 0x01
+#define PMP_W 0x02
+#define PMP_X 0x04
+#define PMP_A 0x18
+#define PMP_A_TOR 0x08
+#define PMP_A_NA4 0x10
+#define PMP_A_NAPOT 0x18
+#define PMP_L 0x80
+
+/* HSTATUS flags */
+#ifdef CONFIG_64BIT
+#define HSTATUS_VSXL _AC(0x300000000, UL)
+#define HSTATUS_VSXL_SHIFT 32
+#endif
+#define HSTATUS_VTSR _AC(0x00400000, UL)
+#define HSTATUS_VTW _AC(0x00200000, UL)
+#define HSTATUS_VTVM _AC(0x00100000, UL)
+#define HSTATUS_VGEIN _AC(0x0003f000, UL)
+#define HSTATUS_VGEIN_SHIFT 12
+#define HSTATUS_HU _AC(0x00000200, UL)
+#define HSTATUS_SPVP _AC(0x00000100, UL)
+#define HSTATUS_SPV _AC(0x00000080, UL)
+#define HSTATUS_GVA _AC(0x00000040, UL)
+#define HSTATUS_VSBE _AC(0x00000020, UL)
+
+/* HGATP flags */
+#define HGATP_MODE_OFF _AC(0, UL)
+#define HGATP_MODE_SV32X4 _AC(1, UL)
+#define HGATP_MODE_SV39X4 _AC(8, UL)
+#define HGATP_MODE_SV48X4 _AC(9, UL)
+#define HGATP_MODE_SV57X4 _AC(10, UL)
+
+#define HGATP32_MODE_SHIFT 31
+#define HGATP32_VMID_SHIFT 22
+#define HGATP32_VMID GENMASK(28, 22)
+#define HGATP32_PPN GENMASK(21, 0)
+
+#define HGATP64_MODE_SHIFT 60
+#define HGATP64_VMID_SHIFT 44
+#define HGATP64_VMID GENMASK(57, 44)
+#define HGATP64_PPN GENMASK(43, 0)
+
+#define HGATP_PAGE_SHIFT 12
+
+#ifdef CONFIG_64BIT
+#define HGATP_PPN HGATP64_PPN
+#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT
+#define HGATP_VMID HGATP64_VMID
+#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT
+#else
+#define HGATP_PPN HGATP32_PPN
+#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT
+#define HGATP_VMID HGATP32_VMID
+#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
+#endif
+
+/* VSIP & HVIP relation */
+#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
+#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
+ (_AC(1, UL) << IRQ_S_TIMER) | \
+ (_AC(1, UL) << IRQ_S_EXT))
+
+/* AIA CSR bits */
+#define TOPI_IID_SHIFT 16
+#define TOPI_IID_MASK GENMASK(11, 0)
+#define TOPI_IPRIO_MASK GENMASK(7, 0)
+#define TOPI_IPRIO_BITS 8
+
+#define TOPEI_ID_SHIFT 16
+#define TOPEI_ID_MASK GENMASK(10, 0)
+#define TOPEI_PRIO_MASK GENMASK(10, 0)
+
+#define ISELECT_IPRIO0 0x30
+#define ISELECT_IPRIO15 0x3f
+#define ISELECT_MASK GENMASK(8, 0)
+
+#define HVICTL_VTI BIT(30)
+#define HVICTL_IID GENMASK(27, 16)
+#define HVICTL_IID_SHIFT 16
+#define HVICTL_DPR BIT(9)
+#define HVICTL_IPRIOM BIT(8)
+#define HVICTL_IPRIO GENMASK(7, 0)
+
+/* xENVCFG flags */
+#define ENVCFG_STCE (_AC(1, ULL) << 63)
+#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
+#define ENVCFG_CBZE (_AC(1, UL) << 7)
+#define ENVCFG_CBCFE (_AC(1, UL) << 6)
+#define ENVCFG_CBIE_SHIFT 4
+#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT)
+#define ENVCFG_CBIE_ILL _AC(0x0, UL)
+#define ENVCFG_CBIE_FLUSH _AC(0x1, UL)
+#define ENVCFG_CBIE_INV _AC(0x3, UL)
+#define ENVCFG_FIOM _AC(0x1, UL)
+
+/* Smstateen bits */
+#define SMSTATEEN0_AIA_IMSIC_SHIFT 58
+#define SMSTATEEN0_AIA_IMSIC (_ULL(1) << SMSTATEEN0_AIA_IMSIC_SHIFT)
+#define SMSTATEEN0_AIA_SHIFT 59
+#define SMSTATEEN0_AIA (_ULL(1) << SMSTATEEN0_AIA_SHIFT)
+#define SMSTATEEN0_AIA_ISEL_SHIFT 60
+#define SMSTATEEN0_AIA_ISEL (_ULL(1) << SMSTATEEN0_AIA_ISEL_SHIFT)
+#define SMSTATEEN0_HSENVCFG_SHIFT 62
+#define SMSTATEEN0_HSENVCFG (_ULL(1) << SMSTATEEN0_HSENVCFG_SHIFT)
+#define SMSTATEEN0_SSTATEEN0_SHIFT 63
+#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT)
+
+/* symbolic CSR names: */
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_HPMCOUNTER3H 0xc83
+#define CSR_HPMCOUNTER4H 0xc84
+#define CSR_HPMCOUNTER5H 0xc85
+#define CSR_HPMCOUNTER6H 0xc86
+#define CSR_HPMCOUNTER7H 0xc87
+#define CSR_HPMCOUNTER8H 0xc88
+#define CSR_HPMCOUNTER9H 0xc89
+#define CSR_HPMCOUNTER10H 0xc8a
+#define CSR_HPMCOUNTER11H 0xc8b
+#define CSR_HPMCOUNTER12H 0xc8c
+#define CSR_HPMCOUNTER13H 0xc8d
+#define CSR_HPMCOUNTER14H 0xc8e
+#define CSR_HPMCOUNTER15H 0xc8f
+#define CSR_HPMCOUNTER16H 0xc90
+#define CSR_HPMCOUNTER17H 0xc91
+#define CSR_HPMCOUNTER18H 0xc92
+#define CSR_HPMCOUNTER19H 0xc93
+#define CSR_HPMCOUNTER20H 0xc94
+#define CSR_HPMCOUNTER21H 0xc95
+#define CSR_HPMCOUNTER22H 0xc96
+#define CSR_HPMCOUNTER23H 0xc97
+#define CSR_HPMCOUNTER24H 0xc98
+#define CSR_HPMCOUNTER25H 0xc99
+#define CSR_HPMCOUNTER26H 0xc9a
+#define CSR_HPMCOUNTER27H 0xc9b
+#define CSR_HPMCOUNTER28H 0xc9c
+#define CSR_HPMCOUNTER29H 0xc9d
+#define CSR_HPMCOUNTER30H 0xc9e
+#define CSR_HPMCOUNTER31H 0xc9f
+
+#define CSR_SSCOUNTOVF 0xda0
+
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SENVCFG 0x10a
+#define CSR_SSTATEEN0 0x10c
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+#define CSR_SATP 0x180
+
+#define CSR_STIMECMP 0x14D
+#define CSR_STIMECMPH 0x15D
+
+/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_SISELECT 0x150
+#define CSR_SIREG 0x151
+
+/* Supervisor-Level Interrupts (AIA) */
+#define CSR_STOPEI 0x15c
+#define CSR_STOPI 0xdb0
+
+/* Supervisor-Level High-Half CSRs (AIA) */
+#define CSR_SIEH 0x114
+#define CSR_SIPH 0x154
+
+#define CSR_VSSTATUS 0x200
+#define CSR_VSIE 0x204
+#define CSR_VSTVEC 0x205
+#define CSR_VSSCRATCH 0x240
+#define CSR_VSEPC 0x241
+#define CSR_VSCAUSE 0x242
+#define CSR_VSTVAL 0x243
+#define CSR_VSIP 0x244
+#define CSR_VSATP 0x280
+#define CSR_VSTIMECMP 0x24D
+#define CSR_VSTIMECMPH 0x25D
+
+#define CSR_HSTATUS 0x600
+#define CSR_HEDELEG 0x602
+#define CSR_HIDELEG 0x603
+#define CSR_HIE 0x604
+#define CSR_HTIMEDELTA 0x605
+#define CSR_HCOUNTEREN 0x606
+#define CSR_HGEIE 0x607
+#define CSR_HENVCFG 0x60a
+#define CSR_HTIMEDELTAH 0x615
+#define CSR_HENVCFGH 0x61a
+#define CSR_HTVAL 0x643
+#define CSR_HIP 0x644
+#define CSR_HVIP 0x645
+#define CSR_HTINST 0x64a
+#define CSR_HGATP 0x680
+#define CSR_HGEIP 0xe12
+
+/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
+#define CSR_HVIEN 0x608
+#define CSR_HVICTL 0x609
+#define CSR_HVIPRIO1 0x646
+#define CSR_HVIPRIO2 0x647
+
+/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */
+#define CSR_VSISELECT 0x250
+#define CSR_VSIREG 0x251
+
+/* VS-Level Interrupts (H-extension with AIA) */
+#define CSR_VSTOPEI 0x25c
+#define CSR_VSTOPI 0xeb0
+
+/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
+#define CSR_HIDELEGH 0x613
+#define CSR_HVIENH 0x618
+#define CSR_HVIPH 0x655
+#define CSR_HVIPRIO1H 0x656
+#define CSR_HVIPRIO2H 0x657
+#define CSR_VSIEH 0x214
+#define CSR_VSIPH 0x254
+
+/* Hypervisor stateen CSRs */
+#define CSR_HSTATEEN0 0x60c
+#define CSR_HSTATEEN0H 0x61c
+
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MIDELEG 0x303
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MENVCFG 0x30a
+#define CSR_MENVCFGH 0x31a
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MTVAL 0x343
+#define CSR_MIP 0x344
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPADDR0 0x3b0
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
+#define CSR_MHARTID 0xf14
+
+/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_MISELECT 0x350
+#define CSR_MIREG 0x351
+
+/* Machine-Level Interrupts (AIA) */
+#define CSR_MTOPEI 0x35c
+#define CSR_MTOPI 0xfb0
+
+/* Virtual Interrupts for Supervisor Level (AIA) */
+#define CSR_MVIEN 0x308
+#define CSR_MVIP 0x309
+
+/* Machine-Level High-Half CSRs (AIA) */
+#define CSR_MIDELEGH 0x313
+#define CSR_MIEH 0x314
+#define CSR_MVIENH 0x318
+#define CSR_MVIPH 0x319
+#define CSR_MIPH 0x354
+
+#define CSR_VSTART 0x8
+#define CSR_VCSR 0xf
+#define CSR_VL 0xc20
+#define CSR_VTYPE 0xc21
+#define CSR_VLENB 0xc22
+
+#ifdef CONFIG_RISCV_M_MODE
+# define CSR_STATUS CSR_MSTATUS
+# define CSR_IE CSR_MIE
+# define CSR_TVEC CSR_MTVEC
+# define CSR_SCRATCH CSR_MSCRATCH
+# define CSR_EPC CSR_MEPC
+# define CSR_CAUSE CSR_MCAUSE
+# define CSR_TVAL CSR_MTVAL
+# define CSR_IP CSR_MIP
+
+# define CSR_IEH CSR_MIEH
+# define CSR_ISELECT CSR_MISELECT
+# define CSR_IREG CSR_MIREG
+# define CSR_IPH CSR_MIPH
+# define CSR_TOPEI CSR_MTOPEI
+# define CSR_TOPI CSR_MTOPI
+
+# define SR_IE SR_MIE
+# define SR_PIE SR_MPIE
+# define SR_PP SR_MPP
+
+# define RV_IRQ_SOFT IRQ_M_SOFT
+# define RV_IRQ_TIMER IRQ_M_TIMER
+# define RV_IRQ_EXT IRQ_M_EXT
+#else /* CONFIG_RISCV_M_MODE */
+# define CSR_STATUS CSR_SSTATUS
+# define CSR_IE CSR_SIE
+# define CSR_TVEC CSR_STVEC
+# define CSR_SCRATCH CSR_SSCRATCH
+# define CSR_EPC CSR_SEPC
+# define CSR_CAUSE CSR_SCAUSE
+# define CSR_TVAL CSR_STVAL
+# define CSR_IP CSR_SIP
+
+# define CSR_IEH CSR_SIEH
+# define CSR_ISELECT CSR_SISELECT
+# define CSR_IREG CSR_SIREG
+# define CSR_IPH CSR_SIPH
+# define CSR_TOPEI CSR_STOPEI
+# define CSR_TOPI CSR_STOPI
+
+# define SR_IE SR_SIE
+# define SR_PIE SR_SPIE
+# define SR_PP SR_SPP
+
+# define RV_IRQ_SOFT IRQ_S_SOFT
+# define RV_IRQ_TIMER IRQ_S_TIMER
+# define RV_IRQ_EXT IRQ_S_EXT
+# define RV_IRQ_PMU IRQ_PMU_OVF
+# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF)
+
+#endif /* !CONFIG_RISCV_M_MODE */
+
+/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
+#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
+#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
+#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
+
+#ifndef __ASSEMBLY__
+
+#define csr_swap(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
+ : "=r" (__v) : \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_write(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CSR_H */
diff --git a/riscv/include/asm/csr.h~ b/riscv/include/asm/csr.h~
new file mode 100644
index 0000000..306a19a
--- /dev/null
+++ b/riscv/include/asm/csr.h~
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CSR_H
+#define _ASM_RISCV_CSR_H
+
+#include <asm/asm.h>
+#include <linux/bits.h>
+
+/* Status register flags */
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
+#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
+
+#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF _AC(0x00000000, UL)
+#define SR_FS_INITIAL _AC(0x00002000, UL)
+#define SR_FS_CLEAN _AC(0x00004000, UL)
+#define SR_FS_DIRTY _AC(0x00006000, UL)
+
+#define SR_VS _AC(0x00000600, UL) /* Vector Status */
+#define SR_VS_OFF _AC(0x00000000, UL)
+#define SR_VS_INITIAL _AC(0x00000200, UL)
+#define SR_VS_CLEAN _AC(0x00000400, UL)
+#define SR_VS_DIRTY _AC(0x00000600, UL)
+
+#define SR_XS _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF _AC(0x00000000, UL)
+#define SR_XS_INITIAL _AC(0x00008000, UL)
+#define SR_XS_CLEAN _AC(0x00010000, UL)
+#define SR_XS_DIRTY _AC(0x00018000, UL)
+
+#define SR_FS_VS (SR_FS | SR_VS) /* Vector and Floating-Point Unit */
+
+#ifndef CONFIG_64BIT
+#define SR_SD _AC(0x80000000, UL) /* FS/VS/XS dirty */
+#else
+#define SR_SD _AC(0x8000000000000000, UL) /* FS/VS/XS dirty */
+#endif
+
+#ifdef CONFIG_64BIT
+#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */
+#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
+#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
+#endif
+
+/* SATP flags */
+#ifndef CONFIG_64BIT
+#define SATP_PPN _AC(0x003FFFFF, UL)
+#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE_SHIFT 31
+#define SATP_ASID_BITS 9
+#define SATP_ASID_SHIFT 22
+#define SATP_ASID_MASK _AC(0x1FF, UL)
+#else
+#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39 _AC(0x8000000000000000, UL)
+#define SATP_MODE_48 _AC(0x9000000000000000, UL)
+#define SATP_MODE_57 _AC(0xa000000000000000, UL)
+#define SATP_MODE_SHIFT 60
+#define SATP_ASID_BITS 16
+#define SATP_ASID_SHIFT 44
+#define SATP_ASID_MASK _AC(0xFFFF, UL)
+#endif
+
+/* Exception cause high bit - is an interrupt if set */
+#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+
+/* Interrupt causes (minus the high bit) */
+#define IRQ_S_SOFT 1
+#define IRQ_VS_SOFT 2
+#define IRQ_M_SOFT 3
+#define IRQ_S_TIMER 5
+#define IRQ_VS_TIMER 6
+#define IRQ_M_TIMER 7
+#define IRQ_S_EXT 9
+#define IRQ_VS_EXT 10
+#define IRQ_M_EXT 11
+#define IRQ_S_GEXT 12
+#define IRQ_PMU_OVF 13
+#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1)
+#define IRQ_LOCAL_MASK GENMASK((IRQ_LOCAL_MAX - 1), 0)
+
+/* Exception causes */
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_INST_ILLEGAL 2
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_MISALIGNED 4
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_MISALIGNED 6
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_HYPERVISOR_SYSCALL 9
+#define EXC_SUPERVISOR_SYSCALL 10
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+#define EXC_INST_GUEST_PAGE_FAULT 20
+#define EXC_LOAD_GUEST_PAGE_FAULT 21
+#define EXC_VIRTUAL_INST_FAULT 22
+#define EXC_STORE_GUEST_PAGE_FAULT 23
+
+/* PMP configuration */
+#define PMP_R 0x01
+#define PMP_W 0x02
+#define PMP_X 0x04
+#define PMP_A 0x18
+#define PMP_A_TOR 0x08
+#define PMP_A_NA4 0x10
+#define PMP_A_NAPOT 0x18
+#define PMP_L 0x80
+
+/* HSTATUS flags */
+#ifdef CONFIG_64BIT
+#define HSTATUS_VSXL _AC(0x300000000, UL)
+#define HSTATUS_VSXL_SHIFT 32
+#endif
+#define HSTATUS_VTSR _AC(0x00400000, UL)
+#define HSTATUS_VTW _AC(0x00200000, UL)
+#define HSTATUS_VTVM _AC(0x00100000, UL)
+#define HSTATUS_VGEIN _AC(0x0003f000, UL)
+#define HSTATUS_VGEIN_SHIFT 12
+#define HSTATUS_HU _AC(0x00000200, UL)
+#define HSTATUS_SPVP _AC(0x00000100, UL)
+#define HSTATUS_SPV _AC(0x00000080, UL)
+#define HSTATUS_GVA _AC(0x00000040, UL)
+#define HSTATUS_VSBE _AC(0x00000020, UL)
+
+/* HGATP flags */
+#define HGATP_MODE_OFF _AC(0, UL)
+#define HGATP_MODE_SV32X4 _AC(1, UL)
+#define HGATP_MODE_SV39X4 _AC(8, UL)
+#define HGATP_MODE_SV48X4 _AC(9, UL)
+#define HGATP_MODE_SV57X4 _AC(10, UL)
+
+#define HGATP32_MODE_SHIFT 31
+#define HGATP32_VMID_SHIFT 22
+#define HGATP32_VMID GENMASK(28, 22)
+#define HGATP32_PPN GENMASK(21, 0)
+
+#define HGATP64_MODE_SHIFT 60
+#define HGATP64_VMID_SHIFT 44
+#define HGATP64_VMID GENMASK(57, 44)
+#define HGATP64_PPN GENMASK(43, 0)
+
+#define HGATP_PAGE_SHIFT 12
+
+#ifdef CONFIG_64BIT
+#define HGATP_PPN HGATP64_PPN
+#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT
+#define HGATP_VMID HGATP64_VMID
+#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT
+#else
+#define HGATP_PPN HGATP32_PPN
+#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT
+#define HGATP_VMID HGATP32_VMID
+#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
+#endif
+
+/* VSIP & HVIP relation */
+#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
+#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
+ (_AC(1, UL) << IRQ_S_TIMER) | \
+ (_AC(1, UL) << IRQ_S_EXT))
+
+/* AIA CSR bits */
+#define TOPI_IID_SHIFT 16
+#define TOPI_IID_MASK GENMASK(11, 0)
+#define TOPI_IPRIO_MASK GENMASK(7, 0)
+#define TOPI_IPRIO_BITS 8
+
+#define TOPEI_ID_SHIFT 16
+#define TOPEI_ID_MASK GENMASK(10, 0)
+#define TOPEI_PRIO_MASK GENMASK(10, 0)
+
+#define ISELECT_IPRIO0 0x30
+#define ISELECT_IPRIO15 0x3f
+#define ISELECT_MASK GENMASK(8, 0)
+
+#define HVICTL_VTI BIT(30)
+#define HVICTL_IID GENMASK(27, 16)
+#define HVICTL_IID_SHIFT 16
+#define HVICTL_DPR BIT(9)
+#define HVICTL_IPRIOM BIT(8)
+#define HVICTL_IPRIO GENMASK(7, 0)
+
+/* xENVCFG flags */
+#define ENVCFG_STCE (_AC(1, ULL) << 63)
+#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
+#define ENVCFG_CBZE (_AC(1, UL) << 7)
+#define ENVCFG_CBCFE (_AC(1, UL) << 6)
+#define ENVCFG_CBIE_SHIFT 4
+#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT)
+#define ENVCFG_CBIE_ILL _AC(0x0, UL)
+#define ENVCFG_CBIE_FLUSH _AC(0x1, UL)
+#define ENVCFG_CBIE_INV _AC(0x3, UL)
+#define ENVCFG_FIOM _AC(0x1, UL)
+
+/* Smstateen bits */
+#define SMSTATEEN0_AIA_IMSIC_SHIFT 58
+#define SMSTATEEN0_AIA_IMSIC (_ULL(1) << SMSTATEEN0_AIA_IMSIC_SHIFT)
+#define SMSTATEEN0_AIA_SHIFT 59
+#define SMSTATEEN0_AIA (_ULL(1) << SMSTATEEN0_AIA_SHIFT)
+#define SMSTATEEN0_AIA_ISEL_SHIFT 60
+#define SMSTATEEN0_AIA_ISEL (_ULL(1) << SMSTATEEN0_AIA_ISEL_SHIFT)
+#define SMSTATEEN0_HSENVCFG_SHIFT 62
+#define SMSTATEEN0_HSENVCFG (_ULL(1) << SMSTATEEN0_HSENVCFG_SHIFT)
+#define SMSTATEEN0_SSTATEEN0_SHIFT 63
+#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT)
+
+/* symbolic CSR names: */
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_HPMCOUNTER3H 0xc83
+#define CSR_HPMCOUNTER4H 0xc84
+#define CSR_HPMCOUNTER5H 0xc85
+#define CSR_HPMCOUNTER6H 0xc86
+#define CSR_HPMCOUNTER7H 0xc87
+#define CSR_HPMCOUNTER8H 0xc88
+#define CSR_HPMCOUNTER9H 0xc89
+#define CSR_HPMCOUNTER10H 0xc8a
+#define CSR_HPMCOUNTER11H 0xc8b
+#define CSR_HPMCOUNTER12H 0xc8c
+#define CSR_HPMCOUNTER13H 0xc8d
+#define CSR_HPMCOUNTER14H 0xc8e
+#define CSR_HPMCOUNTER15H 0xc8f
+#define CSR_HPMCOUNTER16H 0xc90
+#define CSR_HPMCOUNTER17H 0xc91
+#define CSR_HPMCOUNTER18H 0xc92
+#define CSR_HPMCOUNTER19H 0xc93
+#define CSR_HPMCOUNTER20H 0xc94
+#define CSR_HPMCOUNTER21H 0xc95
+#define CSR_HPMCOUNTER22H 0xc96
+#define CSR_HPMCOUNTER23H 0xc97
+#define CSR_HPMCOUNTER24H 0xc98
+#define CSR_HPMCOUNTER25H 0xc99
+#define CSR_HPMCOUNTER26H 0xc9a
+#define CSR_HPMCOUNTER27H 0xc9b
+#define CSR_HPMCOUNTER28H 0xc9c
+#define CSR_HPMCOUNTER29H 0xc9d
+#define CSR_HPMCOUNTER30H 0xc9e
+#define CSR_HPMCOUNTER31H 0xc9f
+
+#define CSR_SSCOUNTOVF 0xda0
+
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SENVCFG 0x10a
+#define CSR_SSTATEEN0 0x10c
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+#define CSR_SATP 0x180
+
+#define CSR_STIMECMP 0x14D
+#define CSR_STIMECMPH 0x15D
+
+/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_SISELECT 0x150
+#define CSR_SIREG 0x151
+
+/* Supervisor-Level Interrupts (AIA) */
+#define CSR_STOPEI 0x15c
+#define CSR_STOPI 0xdb0
+
+/* Supervisor-Level High-Half CSRs (AIA) */
+#define CSR_SIEH 0x114
+#define CSR_SIPH 0x154
+
+#define CSR_VSSTATUS 0x200
+#define CSR_VSIE 0x204
+#define CSR_VSTVEC 0x205
+#define CSR_VSSCRATCH 0x240
+#define CSR_VSEPC 0x241
+#define CSR_VSCAUSE 0x242
+#define CSR_VSTVAL 0x243
+#define CSR_VSIP 0x244
+#define CSR_VSATP 0x280
+#define CSR_VSTIMECMP 0x24D
+#define CSR_VSTIMECMPH 0x25D
+
+#define CSR_HSTATUS 0x600
+#define CSR_HEDELEG 0x602
+#define CSR_HIDELEG 0x603
+#define CSR_HIE 0x604
+#define CSR_HTIMEDELTA 0x605
+#define CSR_HCOUNTEREN 0x606
+#define CSR_HGEIE 0x607
+#define CSR_HENVCFG 0x60a
+#define CSR_HTIMEDELTAH 0x615
+#define CSR_HENVCFGH 0x61a
+#define CSR_HTVAL 0x643
+#define CSR_HIP 0x644
+#define CSR_HVIP 0x645
+#define CSR_HTINST 0x64a
+#define CSR_HGATP 0x680
+#define CSR_HGEIP 0xe12
+
+/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
+#define CSR_HVIEN 0x608
+#define CSR_HVICTL 0x609
+#define CSR_HVIPRIO1 0x646
+#define CSR_HVIPRIO2 0x647
+
+/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */
+#define CSR_VSISELECT 0x250
+#define CSR_VSIREG 0x251
+
+/* VS-Level Interrupts (H-extension with AIA) */
+#define CSR_VSTOPEI 0x25c
+#define CSR_VSTOPI 0xeb0
+
+/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
+#define CSR_HIDELEGH 0x613
+#define CSR_HVIENH 0x618
+#define CSR_HVIPH 0x655
+#define CSR_HVIPRIO1H 0x656
+#define CSR_HVIPRIO2H 0x657
+#define CSR_VSIEH 0x214
+#define CSR_VSIPH 0x254
+
+/* Hypervisor stateen CSRs */
+#define CSR_HSTATEEN0 0x60c
+#define CSR_HSTATEEN0H 0x61c
+
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MIDELEG 0x303
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MENVCFG 0x30a
+#define CSR_MENVCFGH 0x31a
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MTVAL 0x343
+#define CSR_MIP 0x344
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPADDR0 0x3b0
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
+#define CSR_MHARTID 0xf14
+
+/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_MISELECT 0x350
+#define CSR_MIREG 0x351
+
+/* Machine-Level Interrupts (AIA) */
+#define CSR_MTOPEI 0x35c
+#define CSR_MTOPI 0xfb0
+
+/* Virtual Interrupts for Supervisor Level (AIA) */
+#define CSR_MVIEN 0x308
+#define CSR_MVIP 0x309
+
+/* Machine-Level High-Half CSRs (AIA) */
+#define CSR_MIDELEGH 0x313
+#define CSR_MIEH 0x314
+#define CSR_MVIENH 0x318
+#define CSR_MVIPH 0x319
+#define CSR_MIPH 0x354
+
+#define CSR_VSTART 0x8
+#define CSR_VCSR 0xf
+#define CSR_VL 0xc20
+#define CSR_VTYPE 0xc21
+#define CSR_VLENB 0xc22
+
+#ifdef CONFIG_RISCV_M_MODE
+# define CSR_STATUS CSR_MSTATUS
+# define CSR_IE CSR_MIE
+# define CSR_TVEC CSR_MTVEC
+# define CSR_SCRATCH CSR_MSCRATCH
+# define CSR_EPC CSR_MEPC
+# define CSR_CAUSE CSR_MCAUSE
+# define CSR_TVAL CSR_MTVAL
+# define CSR_IP CSR_MIP
+
+# define CSR_IEH CSR_MIEH
+# define CSR_ISELECT CSR_MISELECT
+# define CSR_IREG CSR_MIREG
+# define CSR_IPH CSR_MIPH
+# define CSR_TOPEI CSR_MTOPEI
+# define CSR_TOPI CSR_MTOPI
+
+# define SR_IE SR_MIE
+# define SR_PIE SR_MPIE
+# define SR_PP SR_MPP
+
+# define RV_IRQ_SOFT IRQ_M_SOFT
+# define RV_IRQ_TIMER IRQ_M_TIMER
+# define RV_IRQ_EXT IRQ_M_EXT
+#else /* CONFIG_RISCV_M_MODE */
+# define CSR_STATUS CSR_SSTATUS
+# define CSR_IE CSR_SIE
+# define CSR_TVEC CSR_STVEC
+# define CSR_SCRATCH CSR_SSCRATCH
+# define CSR_EPC CSR_SEPC
+# define CSR_CAUSE CSR_SCAUSE
+# define CSR_TVAL CSR_STVAL
+# define CSR_IP CSR_SIP
+
+# define CSR_IEH CSR_SIEH
+# define CSR_ISELECT CSR_SISELECT
+# define CSR_IREG CSR_SIREG
+# define CSR_IPH CSR_SIPH
+# define CSR_TOPEI CSR_STOPEI
+# define CSR_TOPI CSR_STOPI
+
+# define SR_IE SR_SIE
+# define SR_PIE SR_SPIE
+# define SR_PP SR_SPP
+
+# define RV_IRQ_SOFT IRQ_S_SOFT
+# define RV_IRQ_TIMER IRQ_S_TIMER
+# define RV_IRQ_EXT IRQ_S_EXT
+# define RV_IRQ_PMU IRQ_PMU_OVF
+# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF)
+
+#endif /* !CONFIG_RISCV_M_MODE */
+
+/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
+#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
+#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
+#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
+
+#ifndef __ASSEMBLY__
+
+#define csr_swap(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
+ : "=r" (__v) : \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_write(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CSR_H */
diff --git a/riscv/include/asm/current.h b/riscv/include/asm/current.h
new file mode 100644
index 0000000..21774d8
--- /dev/null
+++ b/riscv/include/asm/current.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Based on arm/arm64/include/asm/current.h
+ *
+ * Copyright (C) 2016 ARM
+ * Copyright (C) 2017 SiFive
+ */
+
+
+#ifndef _ASM_RISCV_CURRENT_H
+#define _ASM_RISCV_CURRENT_H
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+register struct task_struct *riscv_current_is_tp __asm__("tp");
+
+/*
+ * This only works because "struct thread_info" is at offset 0 from "struct
+ * task_struct". This constraint seems to be necessary on other architectures
+ * as well, but __switch_to enforces it. We can't check TASK_TI here because
+ * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
+ * task_struct" here due to some header ordering problems.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+ return riscv_current_is_tp;
+}
+
+#define current get_current()
+
+register unsigned long current_stack_pointer __asm__("sp");
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CURRENT_H */
diff --git a/riscv/include/asm/delay.h b/riscv/include/asm/delay.h
new file mode 100644
index 0000000..524f8ef
--- /dev/null
+++ b/riscv/include/asm/delay.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2016 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_DELAY_H
+#define _ASM_RISCV_DELAY_H
+
+extern unsigned long riscv_timebase;
+
+#define udelay udelay
+extern void udelay(unsigned long usecs);
+
+#define ndelay ndelay
+extern void ndelay(unsigned long nsecs);
+
+extern void __delay(unsigned long cycles);
+
+#endif /* _ASM_RISCV_DELAY_H */
diff --git a/riscv/include/asm/dma-noncoherent.h b/riscv/include/asm/dma-noncoherent.h
new file mode 100644
index 0000000..312cfa0
--- /dev/null
+++ b/riscv/include/asm/dma-noncoherent.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#ifndef __ASM_DMA_NONCOHERENT_H
+#define __ASM_DMA_NONCOHERENT_H
+
+#include <linux/dma-direct.h>
+
+/*
+ * struct riscv_nonstd_cache_ops - Structure for non-standard CMO function pointers
+ *
+ * @wback: Function pointer for cache writeback
+ * @inv: Function pointer for invalidating cache
+ * @wback_inv: Function pointer for flushing the cache (writeback + invalidating)
+ */
+struct riscv_nonstd_cache_ops {
+ void (*wback)(phys_addr_t paddr, size_t size);
+ void (*inv)(phys_addr_t paddr, size_t size);
+ void (*wback_inv)(phys_addr_t paddr, size_t size);
+};
+
+extern struct riscv_nonstd_cache_ops noncoherent_cache_ops;
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops);
+
+#endif /* __ASM_DMA_NONCOHERENT_H */
diff --git a/riscv/include/asm/efi.h b/riscv/include/asm/efi.h
new file mode 100644
index 0000000..46a3559
--- /dev/null
+++ b/riscv/include/asm/efi.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef _ASM_EFI_H
+#define _ASM_EFI_H
+
+#include <asm/csr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_EFI
+extern void efi_init(void);
+#else
+#define efi_init()
+#endif
+
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
+
+#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
+
+/* Load initrd anywhere in system RAM */
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+ return ULONG_MAX;
+}
+
+static inline unsigned long efi_get_kimg_min_align(void)
+{
+ /*
+ * RISC-V requires the kernel image to placed 2 MB aligned base for 64
+ * bit and 4MB for 32 bit.
+ */
+ return IS_ENABLED(CONFIG_64BIT) ? SZ_2M : SZ_4M;
+}
+
+#define EFI_KIMG_PREFERRED_ADDRESS efi_get_kimg_min_align()
+
+void arch_efi_call_virt_setup(void);
+void arch_efi_call_virt_teardown(void);
+
+unsigned long stext_offset(void);
+
+void efi_icache_sync(unsigned long start, unsigned long end);
+
+#endif /* _ASM_EFI_H */
diff --git a/riscv/include/asm/elf.h b/riscv/include/asm/elf.h
new file mode 100644
index 0000000..06c236b
--- /dev/null
+++ b/riscv/include/asm/elf.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_ELF_H
+#define _ASM_RISCV_ELF_H
+
+#include <uapi/linux/elf.h>
+#include <linux/compat.h>
+#include <uapi/asm/elf.h>
+#include <asm/auxvec.h>
+#include <asm/byteorder.h>
+#include <asm/cacheinfo.h>
+#include <asm/cpufeature.h>
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_ARCH EM_RISCV
+
+#ifndef ELF_CLASS
+#ifdef CONFIG_64BIT
+#define ELF_CLASS ELFCLASS64
+#else
+#define ELF_CLASS ELFCLASS32
+#endif
+#endif
+
+#define ELF_DATA ELFDATA2LSB
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) (((x)->e_machine == EM_RISCV) && \
+ ((x)->e_ident[EI_CLASS] == ELF_CLASS))
+
+extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
+#define compat_elf_check_arch compat_elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_FDPIC_CORE_EFLAGS 0
+#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2)
+
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_COMPAT
+#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+ 0x7ff >> (PAGE_SHIFT - 12) : \
+ 0x3ffff >> (PAGE_SHIFT - 12))
+#else
+#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
+#endif
+#endif
+
+/*
+ * Provides information on the availiable set of ISA extensions to userspace,
+ * via a bitmap that coorespends to each single-letter ISA extension. This is
+ * essentially defunct, but will remain for compatibility with userspace.
+ */
+#define ELF_HWCAP riscv_get_elf_hwcap()
+extern unsigned long elf_hwcap;
+
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->a1 = _exec_map_addr; \
+ (_r)->a2 = _interp_map_addr; \
+ (_r)->a3 = dynamic_addr; \
+ } while (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define COMPAT_ELF_PLATFORM (NULL)
+
+#define ARCH_DLINFO \
+do { \
+ /* \
+ * Note that we add ulong after elf_addr_t because \
+ * casting current->mm->context.vdso triggers a cast \
+ * warning of cast from pointer to integer for \
+ * COMPAT ELFCLASS32. \
+ */ \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)(ulong)current->mm->context.vdso); \
+ NEW_AUX_ENT(AT_L1I_CACHESIZE, \
+ get_cache_size(1, CACHE_TYPE_INST)); \
+ NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, \
+ get_cache_geometry(1, CACHE_TYPE_INST)); \
+ NEW_AUX_ENT(AT_L1D_CACHESIZE, \
+ get_cache_size(1, CACHE_TYPE_DATA)); \
+ NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, \
+ get_cache_geometry(1, CACHE_TYPE_DATA)); \
+ NEW_AUX_ENT(AT_L2_CACHESIZE, \
+ get_cache_size(2, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, \
+ get_cache_geometry(2, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L3_CACHESIZE, \
+ get_cache_size(3, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, \
+ get_cache_geometry(3, CACHE_TYPE_UNIFIED)); \
+ /* \
+ * Should always be nonzero unless there's a kernel bug. \
+ * If we haven't determined a sensible value to give to \
+ * userspace, omit the entry: \
+ */ \
+ if (likely(signal_minsigstksz)) \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \
+ else \
+ NEW_AUX_ENT(AT_IGNORE, 0); \
+} while (0)
+
+#ifdef CONFIG_MMU
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#endif /* CONFIG_MMU */
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+do { \
+ *(struct user_regs_struct *)&(dest) = \
+ *(struct user_regs_struct *)regs; \
+} while (0);
+
+#ifdef CONFIG_COMPAT
+
+#define SET_PERSONALITY(ex) \
+do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ set_thread_flag(TIF_32BIT); \
+ else \
+ clear_thread_flag(TIF_32BIT); \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+} while (0)
+
+#define COMPAT_ELF_ET_DYN_BASE ((TASK_SIZE_32 / 3) * 2)
+
+/* rv32 registers */
+typedef compat_ulong_t compat_elf_greg_t;
+typedef compat_elf_greg_t compat_elf_gregset_t[ELF_NGREG];
+
+extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#define compat_arch_setup_additional_pages \
+ compat_arch_setup_additional_pages
+
+#endif /* CONFIG_COMPAT */
+#endif /* _ASM_RISCV_ELF_H */
diff --git a/riscv/include/asm/entry-common.h b/riscv/include/asm/entry-common.h
new file mode 100644
index 0000000..7ab5e34
--- /dev/null
+++ b/riscv/include/asm/entry-common.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_ENTRY_COMMON_H
+#define _ASM_RISCV_ENTRY_COMMON_H
+
+#include <asm/stacktrace.h>
+
+void handle_page_fault(struct pt_regs *regs);
+void handle_break(struct pt_regs *regs);
+
+#ifdef CONFIG_RISCV_MISALIGNED
+int handle_misaligned_load(struct pt_regs *regs);
+int handle_misaligned_store(struct pt_regs *regs);
+#else
+static inline int handle_misaligned_load(struct pt_regs *regs)
+{
+ return -1;
+}
+static inline int handle_misaligned_store(struct pt_regs *regs)
+{
+ return -1;
+}
+#endif
+
+#endif /* _ASM_RISCV_ENTRY_COMMON_H */
diff --git a/riscv/include/asm/errata_list.h b/riscv/include/asm/errata_list.h
new file mode 100644
index 0000000..83ed25e
--- /dev/null
+++ b/riscv/include/asm/errata_list.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Sifive.
+ */
+#ifndef ASM_ERRATA_LIST_H
+#define ASM_ERRATA_LIST_H
+
+#include <asm/alternative.h>
+#include <asm/csr.h>
+#include <asm/insn-def.h>
+#include <asm/hwcap.h>
+#include <asm/vendorid_list.h>
+
+#ifdef CONFIG_ERRATA_ANDES
+#define ERRATA_ANDESTECH_NO_IOCP 0
+#define ERRATA_ANDESTECH_NUMBER 1
+#endif
+
+#ifdef CONFIG_ERRATA_SIFIVE
+#define ERRATA_SIFIVE_CIP_453 0
+#define ERRATA_SIFIVE_CIP_1200 1
+#define ERRATA_SIFIVE_NUMBER 2
+#endif
+
+#ifdef CONFIG_ERRATA_THEAD
+#define ERRATA_THEAD_PBMT 0
+#define ERRATA_THEAD_CMO 1
+#define ERRATA_THEAD_PMU 2
+#define ERRATA_THEAD_NUMBER 3
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define ALT_INSN_FAULT(x) \
+ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \
+ __stringify(RISCV_PTR sifive_cip_453_insn_fault_trp), \
+ SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \
+ CONFIG_ERRATA_SIFIVE_CIP_453)
+
+#define ALT_PAGE_FAULT(x) \
+ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \
+ __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \
+ SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \
+ CONFIG_ERRATA_SIFIVE_CIP_453)
+#else /* !__ASSEMBLY__ */
+
+#define ALT_FLUSH_TLB_PAGE(x) \
+asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
+ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
+ : : "r" (addr) : "memory")
+
+/*
+ * _val is marked as "will be overwritten", so need to set it to 0
+ * in the default case.
+ */
+#define ALT_SVPBMT_SHIFT 61
+#define ALT_THEAD_PBMT_SHIFT 59
+#define ALT_SVPBMT(_val, prot) \
+asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
+ "li %0, %1\t\nslli %0,%0,%3", 0, \
+ RISCV_ISA_EXT_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
+ "li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
+ ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ : "=r"(_val) \
+ : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
+ "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(ALT_SVPBMT_SHIFT), \
+ "I"(ALT_THEAD_PBMT_SHIFT))
+
+#ifdef CONFIG_ERRATA_THEAD_PBMT
+/*
+ * IO/NOCACHE memory types are handled together with svpbmt,
+ * so on T-Head chips, check if no other memory type is set,
+ * and set the non-0 PMA type if applicable.
+ */
+#define ALT_THEAD_PMA(_val) \
+asm volatile(ALTERNATIVE( \
+ __nops(7), \
+ "li t3, %1\n\t" \
+ "slli t3, t3, %3\n\t" \
+ "and t3, %0, t3\n\t" \
+ "bne t3, zero, 2f\n\t" \
+ "li t3, %2\n\t" \
+ "slli t3, t3, %3\n\t" \
+ "or %0, %0, t3\n\t" \
+ "2:", THEAD_VENDOR_ID, \
+ ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ : "+r"(_val) \
+ : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(ALT_THEAD_PBMT_SHIFT) \
+ : "t3")
+#else
+#define ALT_THEAD_PMA(_val)
+#endif
+
+/*
+ * th.dcache.ipa rs1 (invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01010 rs1 000 00000 0001011
+ * th.dache.iva rs1 (invalida, virtual address)
+ * 0000001 00110 rs1 000 00000 0001011
+ *
+ * th.dcache.cpa rs1 (clean, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01001 rs1 000 00000 0001011
+ * th.dcache.cva rs1 (clean, virtual address)
+ * 0000001 00101 rs1 000 00000 0001011
+ *
+ * th.dcache.cipa rs1 (clean then invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01011 rs1 000 00000 0001011
+ * th.dcache.civa rs1 (... virtual address)
+ * 0000001 00111 rs1 000 00000 0001011
+ *
+ * th.sync.s (make sure all cache operations finished)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000000 11001 00000 000 00000 0001011
+ */
+#define THEAD_INVAL_A0 ".long 0x0265000b"
+#define THEAD_CLEAN_A0 ".long 0x0255000b"
+#define THEAD_FLUSH_A0 ".long 0x0275000b"
+#define THEAD_SYNC_S ".long 0x0190000b"
+
+#define ALT_CMO_OP(_op, _start, _size, _cachesize) \
+asm volatile(ALTERNATIVE_2( \
+ __nops(6), \
+ "mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ CBO_##_op(a0) \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \
+ "mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ THEAD_##_op##_A0 "\n\t" \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ THEAD_SYNC_S, THEAD_VENDOR_ID, \
+ ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \
+ : : "r"(_cachesize), \
+ "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
+ "r"((unsigned long)(_start) + (_size)) \
+ : "a0")
+
+#define THEAD_C9XX_RV_IRQ_PMU 17
+#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
+
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE( \
+ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/riscv/include/asm/extable.h b/riscv/include/asm/extable.h
new file mode 100644
index 0000000..3eb5c1f
--- /dev/null
+++ b/riscv/include/asm/extable.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_EXTABLE_H
+#define _ASM_RISCV_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ int insn, fixup;
+ short type, data;
+};
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#define swap_ex_entry_fixup(a, b, tmp, delta) \
+do { \
+ (a)->fixup = (b)->fixup + (delta); \
+ (b)->fixup = (tmp).fixup - (delta); \
+ (a)->type = (b)->type; \
+ (b)->type = (tmp).type; \
+ (a)->data = (b)->data; \
+ (b)->data = (tmp).data; \
+} while (0)
+
+#ifdef CONFIG_MMU
+bool fixup_exception(struct pt_regs *regs);
+#else
+static inline bool fixup_exception(struct pt_regs *regs) { return false; }
+#endif
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
+bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
+#else
+static inline bool
+ex_handler_bpf(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/riscv/include/asm/fence.h b/riscv/include/asm/fence.h
new file mode 100644
index 0000000..2b443a3
--- /dev/null
+++ b/riscv/include/asm/fence.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_RISCV_FENCE_H
+#define _ASM_RISCV_FENCE_H
+
+#ifdef CONFIG_SMP
+#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
+#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#else
+#define RISCV_ACQUIRE_BARRIER
+#define RISCV_RELEASE_BARRIER
+#endif
+
+#endif /* _ASM_RISCV_FENCE_H */
diff --git a/riscv/include/asm/fixmap.h b/riscv/include/asm/fixmap.h
new file mode 100644
index 0000000..0a55099
--- /dev/null
+++ b/riscv/include/asm/fixmap.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_FIXMAP_H
+#define _ASM_RISCV_FIXMAP_H
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/pgtable.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_MMU
+/*
+ * Here we define all the compile-time 'special' virtual addresses.
+ * The point is to have a constant address at compile time, but to
+ * set the physical address only in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are page-sized. Use
+ * set_fixmap(idx,phys) to associate physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+ FIX_HOLE,
+ /*
+ * The fdt fixmap mapping must be PMD aligned and will be mapped
+ * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit.
+ */
+ FIX_FDT_END,
+ FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+
+ /* Below fixmaps will be mapped using fixmap_pte */
+ FIX_PTE,
+ FIX_PMD,
+ FIX_PUD,
+ FIX_P4D,
+ FIX_TEXT_POKE1,
+ FIX_TEXT_POKE0,
+ FIX_EARLYCON_MEM_BASE,
+
+ __end_of_permanent_fixed_addresses,
+ /*
+ * Temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ */
+#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+ __end_of_fixed_addresses
+};
+
+#define __early_set_fixmap __set_fixmap
+
+#define __late_set_fixmap __set_fixmap
+#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR)
+
+extern void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
+
+#endif /* CONFIG_MMU */
+#endif /* _ASM_RISCV_FIXMAP_H */
diff --git a/riscv/include/asm/ftrace.h b/riscv/include/asm/ftrace.h
new file mode 100644
index 0000000..2b2f5df
--- /dev/null
+++ b/riscv/include/asm/ftrace.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_RISCV_FTRACE_H
+#define _ASM_RISCV_FTRACE_H
+
+/*
+ * The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled.
+ * Check arch/riscv/kernel/mcount.S for detail.
+ */
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+#endif
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
+/*
+ * Clang prior to 13 had "mcount" instead of "_mcount":
+ * https://reviews.llvm.org/D98881
+ */
+#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
+#define MCOUNT_NAME _mcount
+#else
+#define MCOUNT_NAME mcount
+#endif
+
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#ifndef __ASSEMBLY__
+void MCOUNT_NAME(void);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+/*
+ * Let's do like x86/arm64 and ignore the compat syscalls.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ return is_compat_task();
+}
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ /*
+ * Since all syscall functions have __riscv_ prefix, we must skip it.
+ * However, as we described above, we decided to ignore compat
+ * syscalls, so we don't care about __riscv_compat_ prefix here.
+ */
+ return !strcmp(sym + 8, name);
+}
+
+struct dyn_arch_ftrace {
+};
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * A general call in RISC-V is a pair of insts:
+ * 1) auipc: setting high-20 pc-related bits to ra register
+ * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
+ * return address (original pc + 4)
+ *
+ *<ftrace enable>:
+ * 0: auipc t0/ra, 0x?
+ * 4: jalr t0/ra, ?(t0/ra)
+ *
+ *<ftrace disable>:
+ * 0: nop
+ * 4: nop
+ *
+ * Dynamic ftrace generates probes to call sites, so we must deal with
+ * both auipc and jalr at the same time.
+ */
+
+#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
+#define JALR_SIGN_MASK (0x00000800)
+#define JALR_OFFSET_MASK (0x00000fff)
+#define AUIPC_OFFSET_MASK (0xfffff000)
+#define AUIPC_PAD (0x00001000)
+#define JALR_SHIFT 20
+#define JALR_RA (0x000080e7)
+#define AUIPC_RA (0x00000097)
+#define JALR_T0 (0x000282e7)
+#define AUIPC_T0 (0x00000297)
+#define NOP4 (0x00000013)
+
+#define to_jalr_t0(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
+
+#define to_auipc_t0(offset) \
+ ((offset & JALR_SIGN_MASK) ? \
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
+
+#define make_call_t0(caller, callee, call) \
+do { \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_t0(offset); \
+ call[1] = to_jalr_t0(offset); \
+} while (0)
+
+#define to_jalr_ra(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
+
+#define to_auipc_ra(offset) \
+ ((offset & JALR_SIGN_MASK) ? \
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
+
+#define make_call_ra(caller, callee, call) \
+do { \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_ra(offset); \
+ call[1] = to_jalr_ra(offset); \
+} while (0)
+
+/*
+ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+ */
+#define MCOUNT_INSN_SIZE 8
+
+#ifndef __ASSEMBLY__
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+struct fgraph_ret_regs {
+ unsigned long a1;
+ unsigned long a0;
+ unsigned long s0;
+ unsigned long ra;
+};
+
+static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->a0;
+}
+
+static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->s0;
+}
+#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
+#endif
+
+#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/riscv/include/asm/futex.h b/riscv/include/asm/futex.h
new file mode 100644
index 0000000..fc8130f
--- /dev/null
+++ b/riscv/include/asm/futex.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 2018 Jim Wilson (jimw@sifive.com)
+ */
+
+#ifndef _ASM_RISCV_FUTEX_H
+#define _ASM_RISCV_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asm-extable.h>
+
+/* We don't even really need the extable code, but for now keep it simple */
+#ifndef CONFIG_MMU
+#define __enable_user_access() do { } while (0)
+#define __disable_user_access() do { } while (0)
+#endif
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1: " insn " \n" \
+ "2: \n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r]) \
+ : [r] "+r" (ret), [ov] "=&r" (oldval), \
+ [u] "+m" (*uaddr) \
+ : [op] "Jr" (oparg) \
+ : "memory"); \
+ __disable_user_access(); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+ uintptr_t tmp;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __enable_user_access();
+ __asm__ __volatile__ (
+ "1: lr.w.aqrl %[v],%[u] \n"
+ " bne %[v],%z[ov],3f \n"
+ "2: sc.w.aqrl %[t],%z[nv],%[u] \n"
+ " bnez %[t],1b \n"
+ "3: \n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
+ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval)
+ : "memory");
+ __disable_user_access();
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* _ASM_RISCV_FUTEX_H */
diff --git a/riscv/include/asm/gdb_xml.h b/riscv/include/asm/gdb_xml.h
new file mode 100644
index 0000000..0934211
--- /dev/null
+++ b/riscv/include/asm/gdb_xml.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GDB_XML_H_
+#define __ASM_GDB_XML_H_
+
+const char riscv_gdb_stub_feature[64] =
+ "PacketSize=800;qXfer:features:read+;";
+
+static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
+
+#ifdef CONFIG_64BIT
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-64bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-64bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"64\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"64\" type=\"int\"/>"
+"</feature>";
+#else
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-32bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-32bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"32\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"32\" type=\"int\"/>"
+"</feature>";
+#endif
+#endif
diff --git a/riscv/include/asm/gpr-num.h b/riscv/include/asm/gpr-num.h
new file mode 100644
index 0000000..efeb5ed
--- /dev/null
+++ b/riscv/include/asm/gpr-num.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GPR_NUM_H
+#define __ASM_GPR_NUM_H
+
+#ifdef __ASSEMBLY__
+
+ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ .equ .L__gpr_num_x\num, \num
+ .endr
+
+ .equ .L__gpr_num_zero, 0
+ .equ .L__gpr_num_ra, 1
+ .equ .L__gpr_num_sp, 2
+ .equ .L__gpr_num_gp, 3
+ .equ .L__gpr_num_tp, 4
+ .equ .L__gpr_num_t0, 5
+ .equ .L__gpr_num_t1, 6
+ .equ .L__gpr_num_t2, 7
+ .equ .L__gpr_num_s0, 8
+ .equ .L__gpr_num_s1, 9
+ .equ .L__gpr_num_a0, 10
+ .equ .L__gpr_num_a1, 11
+ .equ .L__gpr_num_a2, 12
+ .equ .L__gpr_num_a3, 13
+ .equ .L__gpr_num_a4, 14
+ .equ .L__gpr_num_a5, 15
+ .equ .L__gpr_num_a6, 16
+ .equ .L__gpr_num_a7, 17
+ .equ .L__gpr_num_s2, 18
+ .equ .L__gpr_num_s3, 19
+ .equ .L__gpr_num_s4, 20
+ .equ .L__gpr_num_s5, 21
+ .equ .L__gpr_num_s6, 22
+ .equ .L__gpr_num_s7, 23
+ .equ .L__gpr_num_s8, 24
+ .equ .L__gpr_num_s9, 25
+ .equ .L__gpr_num_s10, 26
+ .equ .L__gpr_num_s11, 27
+ .equ .L__gpr_num_t3, 28
+ .equ .L__gpr_num_t4, 29
+ .equ .L__gpr_num_t5, 30
+ .equ .L__gpr_num_t6, 31
+
+#else /* __ASSEMBLY__ */
+
+#define __DEFINE_ASM_GPR_NUMS \
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
+" .equ .L__gpr_num_x\\num, \\num\n" \
+" .endr\n" \
+" .equ .L__gpr_num_zero, 0\n" \
+" .equ .L__gpr_num_ra, 1\n" \
+" .equ .L__gpr_num_sp, 2\n" \
+" .equ .L__gpr_num_gp, 3\n" \
+" .equ .L__gpr_num_tp, 4\n" \
+" .equ .L__gpr_num_t0, 5\n" \
+" .equ .L__gpr_num_t1, 6\n" \
+" .equ .L__gpr_num_t2, 7\n" \
+" .equ .L__gpr_num_s0, 8\n" \
+" .equ .L__gpr_num_s1, 9\n" \
+" .equ .L__gpr_num_a0, 10\n" \
+" .equ .L__gpr_num_a1, 11\n" \
+" .equ .L__gpr_num_a2, 12\n" \
+" .equ .L__gpr_num_a3, 13\n" \
+" .equ .L__gpr_num_a4, 14\n" \
+" .equ .L__gpr_num_a5, 15\n" \
+" .equ .L__gpr_num_a6, 16\n" \
+" .equ .L__gpr_num_a7, 17\n" \
+" .equ .L__gpr_num_s2, 18\n" \
+" .equ .L__gpr_num_s3, 19\n" \
+" .equ .L__gpr_num_s4, 20\n" \
+" .equ .L__gpr_num_s5, 21\n" \
+" .equ .L__gpr_num_s6, 22\n" \
+" .equ .L__gpr_num_s7, 23\n" \
+" .equ .L__gpr_num_s8, 24\n" \
+" .equ .L__gpr_num_s9, 25\n" \
+" .equ .L__gpr_num_s10, 26\n" \
+" .equ .L__gpr_num_s11, 27\n" \
+" .equ .L__gpr_num_t3, 28\n" \
+" .equ .L__gpr_num_t4, 29\n" \
+" .equ .L__gpr_num_t5, 30\n" \
+" .equ .L__gpr_num_t6, 31\n"
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_GPR_NUM_H */
diff --git a/riscv/include/asm/hugetlb.h b/riscv/include/asm/hugetlb.h
new file mode 100644
index 0000000..20f9c3b
--- /dev/null
+++ b/riscv/include/asm/hugetlb.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_HUGETLB_H
+#define _ASM_RISCV_HUGETLB_H
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+
+bool arch_hugetlb_migration_supported(struct hstate *h);
+#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz);
+
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+void set_huge_pte_at(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pte,
+ unsigned long sz);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET
+pte_t huge_ptep_get(pte_t *ptep);
+
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
+#define arch_make_huge_pte arch_make_huge_pte
+
+#endif /*CONFIG_RISCV_ISA_SVNAPOT*/
+
+#include <asm-generic/hugetlb.h>
+
+#endif /* _ASM_RISCV_HUGETLB_H */
diff --git a/riscv/include/asm/hwcap.h b/riscv/include/asm/hwcap.h
new file mode 100644
index 0000000..06d3052
--- /dev/null
+++ b/riscv/include/asm/hwcap.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from arch/arm64/include/asm/hwcap.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ */
+#ifndef _ASM_RISCV_HWCAP_H
+#define _ASM_RISCV_HWCAP_H
+
+#include <uapi/asm/hwcap.h>
+
+#define RISCV_ISA_EXT_a ('a' - 'a')
+#define RISCV_ISA_EXT_b ('b' - 'a')
+#define RISCV_ISA_EXT_c ('c' - 'a')
+#define RISCV_ISA_EXT_d ('d' - 'a')
+#define RISCV_ISA_EXT_f ('f' - 'a')
+#define RISCV_ISA_EXT_h ('h' - 'a')
+#define RISCV_ISA_EXT_i ('i' - 'a')
+#define RISCV_ISA_EXT_j ('j' - 'a')
+#define RISCV_ISA_EXT_k ('k' - 'a')
+#define RISCV_ISA_EXT_m ('m' - 'a')
+#define RISCV_ISA_EXT_p ('p' - 'a')
+#define RISCV_ISA_EXT_q ('q' - 'a')
+#define RISCV_ISA_EXT_s ('s' - 'a')
+#define RISCV_ISA_EXT_u ('u' - 'a')
+#define RISCV_ISA_EXT_v ('v' - 'a')
+
+/*
+ * These macros represent the logical IDs of each multi-letter RISC-V ISA
+ * extension and are used in the ISA bitmap. The logical IDs start from
+ * RISCV_ISA_EXT_BASE, which allows the 0-25 range to be reserved for single
+ * letter extensions. The maximum, RISCV_ISA_EXT_MAX, is defined in order
+ * to allocate the bitmap and may be increased when necessary.
+ *
+ * New extensions should just be added to the bottom, rather than added
+ * alphabetically, in order to avoid unnecessary shuffling.
+ */
+#define RISCV_ISA_EXT_BASE 26
+
+#define RISCV_ISA_EXT_SSCOFPMF 26
+#define RISCV_ISA_EXT_SSTC 27
+#define RISCV_ISA_EXT_SVINVAL 28
+#define RISCV_ISA_EXT_SVPBMT 29
+#define RISCV_ISA_EXT_ZBB 30
+#define RISCV_ISA_EXT_ZICBOM 31
+#define RISCV_ISA_EXT_ZIHINTPAUSE 32
+#define RISCV_ISA_EXT_SVNAPOT 33
+#define RISCV_ISA_EXT_ZICBOZ 34
+#define RISCV_ISA_EXT_SMAIA 35
+#define RISCV_ISA_EXT_SSAIA 36
+#define RISCV_ISA_EXT_ZBA 37
+#define RISCV_ISA_EXT_ZBS 38
+#define RISCV_ISA_EXT_ZICNTR 39
+#define RISCV_ISA_EXT_ZICSR 40
+#define RISCV_ISA_EXT_ZIFENCEI 41
+#define RISCV_ISA_EXT_ZIHPM 42
+#define RISCV_ISA_EXT_SMSTATEEN 43
+#define RISCV_ISA_EXT_ZICOND 44
+
+#define RISCV_ISA_EXT_MAX 64
+
+#ifdef CONFIG_RISCV_M_MODE
+#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA
+#else
+#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA
+#endif
+
+#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/riscv/include/asm/hwprobe.h b/riscv/include/asm/hwprobe.h
new file mode 100644
index 0000000..5c48f48
--- /dev/null
+++ b/riscv/include/asm/hwprobe.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2023 Rivos, Inc
+ */
+
+#ifndef _ASM_HWPROBE_H
+#define _ASM_HWPROBE_H
+
+#include <uapi/asm/hwprobe.h>
+
+#define RISCV_HWPROBE_MAX_KEY 6
+
+static inline bool riscv_hwprobe_key_is_valid(__s64 key)
+{
+ return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
+}
+
+#endif
diff --git a/riscv/include/asm/image.h b/riscv/include/asm/image.h
new file mode 100644
index 0000000..e0b319a
--- /dev/null
+++ b/riscv/include/asm/image.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_IMAGE_H
+#define _ASM_RISCV_IMAGE_H
+
+#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
+#define RISCV_IMAGE_MAGIC2 "RSC\x05"
+
+#define RISCV_IMAGE_FLAG_BE_SHIFT 0
+#define RISCV_IMAGE_FLAG_BE_MASK 0x1
+
+#define RISCV_IMAGE_FLAG_LE 0
+#define RISCV_IMAGE_FLAG_BE 1
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#error conversion of header fields to LE not yet implemented
+#else
+#define __HEAD_FLAG_BE RISCV_IMAGE_FLAG_LE
+#endif
+
+#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \
+ RISCV_IMAGE_FLAG_##field##_SHIFT)
+
+#define __HEAD_FLAGS (__HEAD_FLAG(BE))
+
+#define RISCV_HEADER_VERSION_MAJOR 0
+#define RISCV_HEADER_VERSION_MINOR 2
+
+#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
+ RISCV_HEADER_VERSION_MINOR)
+
+#ifndef __ASSEMBLY__
+/**
+ * struct riscv_image_header - riscv kernel image header
+ * @code0: Executable code
+ * @code1: Executable code
+ * @text_offset: Image load offset (little endian)
+ * @image_size: Effective Image size (little endian)
+ * @flags: kernel flags (little endian)
+ * @version: version
+ * @res1: reserved
+ * @res2: reserved
+ * @magic: Magic number (RISC-V specific; deprecated)
+ * @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
+ * @res3: reserved (will be used for PE COFF offset)
+ *
+ * The intention is for this header format to be shared between multiple
+ * architectures to avoid a proliferation of image header formats.
+ */
+
+struct riscv_image_header {
+ u32 code0;
+ u32 code1;
+ u64 text_offset;
+ u64 image_size;
+ u64 flags;
+ u32 version;
+ u32 res1;
+ u64 res2;
+ u64 magic;
+ u32 magic2;
+ u32 res3;
+};
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_RISCV_IMAGE_H */
diff --git a/riscv/include/asm/insn-def.h b/riscv/include/asm/insn-def.h
new file mode 100644
index 0000000..e27179b
--- /dev/null
+++ b/riscv/include/asm/insn-def.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_INSN_DEF_H
+#define __ASM_INSN_DEF_H
+
+#include <asm/asm.h>
+
+#define INSN_R_FUNC7_SHIFT 25
+#define INSN_R_RS2_SHIFT 20
+#define INSN_R_RS1_SHIFT 15
+#define INSN_R_FUNC3_SHIFT 12
+#define INSN_R_RD_SHIFT 7
+#define INSN_R_OPCODE_SHIFT 0
+
+#define INSN_I_SIMM12_SHIFT 20
+#define INSN_I_RS1_SHIFT 15
+#define INSN_I_FUNC3_SHIFT 12
+#define INSN_I_RD_SHIFT 7
+#define INSN_I_OPCODE_SHIFT 0
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_AS_HAS_INSN
+
+ .macro insn_r, opcode, func3, func7, rd, rs1, rs2
+ .insn r \opcode, \func3, \func7, \rd, \rs1, \rs2
+ .endm
+
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .insn i \opcode, \func3, \rd, \rs1, \simm12
+ .endm
+
+#else
+
+#include <asm/gpr-num.h>
+
+ .macro insn_r, opcode, func3, func7, rd, rs1, rs2
+ .4byte ((\opcode << INSN_R_OPCODE_SHIFT) | \
+ (\func3 << INSN_R_FUNC3_SHIFT) | \
+ (\func7 << INSN_R_FUNC7_SHIFT) | \
+ (.L__gpr_num_\rd << INSN_R_RD_SHIFT) | \
+ (.L__gpr_num_\rs1 << INSN_R_RS1_SHIFT) | \
+ (.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT))
+ .endm
+
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .4byte ((\opcode << INSN_I_OPCODE_SHIFT) | \
+ (\func3 << INSN_I_FUNC3_SHIFT) | \
+ (.L__gpr_num_\rd << INSN_I_RD_SHIFT) | \
+ (.L__gpr_num_\rs1 << INSN_I_RS1_SHIFT) | \
+ (\simm12 << INSN_I_SIMM12_SHIFT))
+ .endm
+
+#endif
+
+#define __INSN_R(...) insn_r __VA_ARGS__
+#define __INSN_I(...) insn_i __VA_ARGS__
+
+#else /* ! __ASSEMBLY__ */
+
+#ifdef CONFIG_AS_HAS_INSN
+
+#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
+ ".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n"
+
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ ".insn i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n"
+
+#else
+
+#include <linux/stringify.h>
+#include <asm/gpr-num.h>
+
+#define DEFINE_INSN_R \
+ __DEFINE_ASM_GPR_NUMS \
+" .macro insn_r, opcode, func3, func7, rd, rs1, rs2\n" \
+" .4byte ((\\opcode << " __stringify(INSN_R_OPCODE_SHIFT) ") |" \
+" (\\func3 << " __stringify(INSN_R_FUNC3_SHIFT) ") |" \
+" (\\func7 << " __stringify(INSN_R_FUNC7_SHIFT) ") |" \
+" (.L__gpr_num_\\rd << " __stringify(INSN_R_RD_SHIFT) ") |" \
+" (.L__gpr_num_\\rs1 << " __stringify(INSN_R_RS1_SHIFT) ") |" \
+" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \
+" .endm\n"
+
+#define DEFINE_INSN_I \
+ __DEFINE_ASM_GPR_NUMS \
+" .macro insn_i, opcode, func3, rd, rs1, simm12\n" \
+" .4byte ((\\opcode << " __stringify(INSN_I_OPCODE_SHIFT) ") |" \
+" (\\func3 << " __stringify(INSN_I_FUNC3_SHIFT) ") |" \
+" (.L__gpr_num_\\rd << " __stringify(INSN_I_RD_SHIFT) ") |" \
+" (.L__gpr_num_\\rs1 << " __stringify(INSN_I_RS1_SHIFT) ") |" \
+" (\\simm12 << " __stringify(INSN_I_SIMM12_SHIFT) "))\n" \
+" .endm\n"
+
+#define UNDEFINE_INSN_R \
+" .purgem insn_r\n"
+
+#define UNDEFINE_INSN_I \
+" .purgem insn_i\n"
+
+#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
+ DEFINE_INSN_R \
+ "insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \
+ UNDEFINE_INSN_R
+
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ DEFINE_INSN_I \
+ "insn_i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" \
+ UNDEFINE_INSN_I
+
+#endif
+
+#endif /* ! __ASSEMBLY__ */
+
+#define INSN_R(opcode, func3, func7, rd, rs1, rs2) \
+ __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \
+ RV_##rd, RV_##rs1, RV_##rs2)
+
+#define INSN_I(opcode, func3, rd, rs1, simm12) \
+ __INSN_I(RV_##opcode, RV_##func3, RV_##rd, \
+ RV_##rs1, RV_##simm12)
+
+#define RV_OPCODE(v) __ASM_STR(v)
+#define RV_FUNC3(v) __ASM_STR(v)
+#define RV_FUNC7(v) __ASM_STR(v)
+#define RV_SIMM12(v) __ASM_STR(v)
+#define RV_RD(v) __ASM_STR(v)
+#define RV_RS1(v) __ASM_STR(v)
+#define RV_RS2(v) __ASM_STR(v)
+#define __RV_REG(v) __ASM_STR(x ## v)
+#define RV___RD(v) __RV_REG(v)
+#define RV___RS1(v) __RV_REG(v)
+#define RV___RS2(v) __RV_REG(v)
+
+#define RV_OPCODE_MISC_MEM RV_OPCODE(15)
+#define RV_OPCODE_SYSTEM RV_OPCODE(115)
+
+#define HFENCE_VVMA(vaddr, asid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(17), \
+ __RD(0), RS1(vaddr), RS2(asid))
+
+#define HFENCE_GVMA(gaddr, vmid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(49), \
+ __RD(0), RS1(gaddr), RS2(vmid))
+
+#define HLVX_HU(dest, addr) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(50), \
+ RD(dest), RS1(addr), __RS2(3))
+
+#define HLV_W(dest, addr) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(52), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#ifdef CONFIG_64BIT
+#define HLV_D(dest, addr) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(54), \
+ RD(dest), RS1(addr), __RS2(0))
+#else
+#define HLV_D(dest, addr) \
+ __ASM_STR(.error "hlv.d requires 64-bit support")
+#endif
+
+#define SINVAL_VMA(vaddr, asid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(11), \
+ __RD(0), RS1(vaddr), RS2(asid))
+
+#define SFENCE_W_INVAL() \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \
+ __RD(0), __RS1(0), __RS2(0))
+
+#define SFENCE_INVAL_IR() \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \
+ __RD(0), __RS1(0), __RS2(1))
+
+#define HINVAL_VVMA(vaddr, asid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(19), \
+ __RD(0), RS1(vaddr), RS2(asid))
+
+#define HINVAL_GVMA(gaddr, vmid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \
+ __RD(0), RS1(gaddr), RS2(vmid))
+
+#define CBO_INVAL(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(0))
+
+#define CBO_CLEAN(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(1))
+
+#define CBO_FLUSH(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(2))
+
+#define CBO_ZERO(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(4))
+
+#endif /* __ASM_INSN_DEF_H */
diff --git a/riscv/include/asm/insn.h b/riscv/include/asm/insn.h
new file mode 100644
index 0000000..06e439e
--- /dev/null
+++ b/riscv/include/asm/insn.h
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_INSN_H
+#define _ASM_RISCV_INSN_H
+
+#include <linux/bits.h>
+
+#define RV_INSN_FUNCT3_MASK GENMASK(14, 12)
+#define RV_INSN_FUNCT3_OPOFF 12
+#define RV_INSN_OPCODE_MASK GENMASK(6, 0)
+#define RV_INSN_OPCODE_OPOFF 0
+#define RV_INSN_FUNCT12_OPOFF 20
+
+#define RV_ENCODE_FUNCT3(f_) (RVG_FUNCT3_##f_ << RV_INSN_FUNCT3_OPOFF)
+#define RV_ENCODE_FUNCT12(f_) (RVG_FUNCT12_##f_ << RV_INSN_FUNCT12_OPOFF)
+
+/* The bit field of immediate value in I-type instruction */
+#define RV_I_IMM_SIGN_OPOFF 31
+#define RV_I_IMM_11_0_OPOFF 20
+#define RV_I_IMM_SIGN_OFF 12
+#define RV_I_IMM_11_0_OFF 0
+#define RV_I_IMM_11_0_MASK GENMASK(11, 0)
+
+/* The bit field of immediate value in J-type instruction */
+#define RV_J_IMM_SIGN_OPOFF 31
+#define RV_J_IMM_10_1_OPOFF 21
+#define RV_J_IMM_11_OPOFF 20
+#define RV_J_IMM_19_12_OPOFF 12
+#define RV_J_IMM_SIGN_OFF 20
+#define RV_J_IMM_10_1_OFF 1
+#define RV_J_IMM_11_OFF 11
+#define RV_J_IMM_19_12_OFF 12
+#define RV_J_IMM_10_1_MASK GENMASK(9, 0)
+#define RV_J_IMM_11_MASK GENMASK(0, 0)
+#define RV_J_IMM_19_12_MASK GENMASK(7, 0)
+
+/*
+ * U-type IMMs contain the upper 20bits [31:20] of an immediate with
+ * the rest filled in by zeros, so no shifting required. Similarly,
+ * bit31 contains the signed state, so no sign extension necessary.
+ */
+#define RV_U_IMM_SIGN_OPOFF 31
+#define RV_U_IMM_31_12_OPOFF 0
+#define RV_U_IMM_31_12_MASK GENMASK(31, 12)
+
+/* The bit field of immediate value in B-type instruction */
+#define RV_B_IMM_SIGN_OPOFF 31
+#define RV_B_IMM_10_5_OPOFF 25
+#define RV_B_IMM_4_1_OPOFF 8
+#define RV_B_IMM_11_OPOFF 7
+#define RV_B_IMM_SIGN_OFF 12
+#define RV_B_IMM_10_5_OFF 5
+#define RV_B_IMM_4_1_OFF 1
+#define RV_B_IMM_11_OFF 11
+#define RV_B_IMM_10_5_MASK GENMASK(5, 0)
+#define RV_B_IMM_4_1_MASK GENMASK(3, 0)
+#define RV_B_IMM_11_MASK GENMASK(0, 0)
+
+/* The register offset in RVG instruction */
+#define RVG_RS1_OPOFF 15
+#define RVG_RS2_OPOFF 20
+#define RVG_RD_OPOFF 7
+#define RVG_RS1_MASK GENMASK(4, 0)
+#define RVG_RD_MASK GENMASK(4, 0)
+
+/* The bit field of immediate value in RVC J instruction */
+#define RVC_J_IMM_SIGN_OPOFF 12
+#define RVC_J_IMM_4_OPOFF 11
+#define RVC_J_IMM_9_8_OPOFF 9
+#define RVC_J_IMM_10_OPOFF 8
+#define RVC_J_IMM_6_OPOFF 7
+#define RVC_J_IMM_7_OPOFF 6
+#define RVC_J_IMM_3_1_OPOFF 3
+#define RVC_J_IMM_5_OPOFF 2
+#define RVC_J_IMM_SIGN_OFF 11
+#define RVC_J_IMM_4_OFF 4
+#define RVC_J_IMM_9_8_OFF 8
+#define RVC_J_IMM_10_OFF 10
+#define RVC_J_IMM_6_OFF 6
+#define RVC_J_IMM_7_OFF 7
+#define RVC_J_IMM_3_1_OFF 1
+#define RVC_J_IMM_5_OFF 5
+#define RVC_J_IMM_4_MASK GENMASK(0, 0)
+#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
+#define RVC_J_IMM_10_MASK GENMASK(0, 0)
+#define RVC_J_IMM_6_MASK GENMASK(0, 0)
+#define RVC_J_IMM_7_MASK GENMASK(0, 0)
+#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
+#define RVC_J_IMM_5_MASK GENMASK(0, 0)
+
+/* The bit field of immediate value in RVC B instruction */
+#define RVC_B_IMM_SIGN_OPOFF 12
+#define RVC_B_IMM_4_3_OPOFF 10
+#define RVC_B_IMM_7_6_OPOFF 5
+#define RVC_B_IMM_2_1_OPOFF 3
+#define RVC_B_IMM_5_OPOFF 2
+#define RVC_B_IMM_SIGN_OFF 8
+#define RVC_B_IMM_4_3_OFF 3
+#define RVC_B_IMM_7_6_OFF 6
+#define RVC_B_IMM_2_1_OFF 1
+#define RVC_B_IMM_5_OFF 5
+#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
+#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
+#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
+#define RVC_B_IMM_5_MASK GENMASK(0, 0)
+
+#define RVC_INSN_FUNCT4_MASK GENMASK(15, 12)
+#define RVC_INSN_FUNCT4_OPOFF 12
+#define RVC_INSN_FUNCT3_MASK GENMASK(15, 13)
+#define RVC_INSN_FUNCT3_OPOFF 13
+#define RVC_INSN_J_RS1_MASK GENMASK(11, 7)
+#define RVC_INSN_J_RS2_MASK GENMASK(6, 2)
+#define RVC_INSN_OPCODE_MASK GENMASK(1, 0)
+#define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF)
+#define RVC_ENCODE_FUNCT4(f_) (RVC_FUNCT4_##f_ << RVC_INSN_FUNCT4_OPOFF)
+
+/* The register offset in RVC op=C0 instruction */
+#define RVC_C0_RS1_OPOFF 7
+#define RVC_C0_RS2_OPOFF 2
+#define RVC_C0_RD_OPOFF 2
+
+/* The register offset in RVC op=C1 instruction */
+#define RVC_C1_RS1_OPOFF 7
+#define RVC_C1_RS2_OPOFF 2
+#define RVC_C1_RD_OPOFF 7
+
+/* The register offset in RVC op=C2 instruction */
+#define RVC_C2_RS1_OPOFF 7
+#define RVC_C2_RS2_OPOFF 2
+#define RVC_C2_RD_OPOFF 7
+#define RVC_C2_RS1_MASK GENMASK(4, 0)
+
+/* parts of opcode for RVG*/
+#define RVG_OPCODE_FENCE 0x0f
+#define RVG_OPCODE_AUIPC 0x17
+#define RVG_OPCODE_BRANCH 0x63
+#define RVG_OPCODE_JALR 0x67
+#define RVG_OPCODE_JAL 0x6f
+#define RVG_OPCODE_SYSTEM 0x73
+#define RVG_SYSTEM_CSR_OFF 20
+#define RVG_SYSTEM_CSR_MASK GENMASK(12, 0)
+
+/* parts of opcode for RVF, RVD and RVQ */
+#define RVFDQ_FL_FS_WIDTH_OFF 12
+#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0)
+#define RVFDQ_FL_FS_WIDTH_W 2
+#define RVFDQ_FL_FS_WIDTH_D 3
+#define RVFDQ_LS_FS_WIDTH_Q 4
+#define RVFDQ_OPCODE_FL 0x07
+#define RVFDQ_OPCODE_FS 0x27
+
+/* parts of opcode for RVV */
+#define RVV_OPCODE_VECTOR 0x57
+#define RVV_VL_VS_WIDTH_8 0
+#define RVV_VL_VS_WIDTH_16 5
+#define RVV_VL_VS_WIDTH_32 6
+#define RVV_VL_VS_WIDTH_64 7
+#define RVV_OPCODE_VL RVFDQ_OPCODE_FL
+#define RVV_OPCODE_VS RVFDQ_OPCODE_FS
+
+/* parts of opcode for RVC*/
+#define RVC_OPCODE_C0 0x0
+#define RVC_OPCODE_C1 0x1
+#define RVC_OPCODE_C2 0x2
+
+/* parts of funct3 code for I, M, A extension*/
+#define RVG_FUNCT3_JALR 0x0
+#define RVG_FUNCT3_BEQ 0x0
+#define RVG_FUNCT3_BNE 0x1
+#define RVG_FUNCT3_BLT 0x4
+#define RVG_FUNCT3_BGE 0x5
+#define RVG_FUNCT3_BLTU 0x6
+#define RVG_FUNCT3_BGEU 0x7
+
+/* parts of funct3 code for C extension*/
+#define RVC_FUNCT3_C_BEQZ 0x6
+#define RVC_FUNCT3_C_BNEZ 0x7
+#define RVC_FUNCT3_C_J 0x5
+#define RVC_FUNCT3_C_JAL 0x1
+#define RVC_FUNCT4_C_JR 0x8
+#define RVC_FUNCT4_C_JALR 0x9
+#define RVC_FUNCT4_C_EBREAK 0x9
+
+#define RVG_FUNCT12_EBREAK 0x1
+#define RVG_FUNCT12_SRET 0x102
+
+#define RVG_MATCH_AUIPC (RVG_OPCODE_AUIPC)
+#define RVG_MATCH_JALR (RV_ENCODE_FUNCT3(JALR) | RVG_OPCODE_JALR)
+#define RVG_MATCH_JAL (RVG_OPCODE_JAL)
+#define RVG_MATCH_FENCE (RVG_OPCODE_FENCE)
+#define RVG_MATCH_BEQ (RV_ENCODE_FUNCT3(BEQ) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BNE (RV_ENCODE_FUNCT3(BNE) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BLT (RV_ENCODE_FUNCT3(BLT) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BGE (RV_ENCODE_FUNCT3(BGE) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BLTU (RV_ENCODE_FUNCT3(BLTU) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BGEU (RV_ENCODE_FUNCT3(BGEU) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_EBREAK (RV_ENCODE_FUNCT12(EBREAK) | RVG_OPCODE_SYSTEM)
+#define RVG_MATCH_SRET (RV_ENCODE_FUNCT12(SRET) | RVG_OPCODE_SYSTEM)
+#define RVC_MATCH_C_BEQZ (RVC_ENCODE_FUNCT3(C_BEQZ) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_BNEZ (RVC_ENCODE_FUNCT3(C_BNEZ) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_J (RVC_ENCODE_FUNCT3(C_J) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_JAL (RVC_ENCODE_FUNCT3(C_JAL) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_JR (RVC_ENCODE_FUNCT4(C_JR) | RVC_OPCODE_C2)
+#define RVC_MATCH_C_JALR (RVC_ENCODE_FUNCT4(C_JALR) | RVC_OPCODE_C2)
+#define RVC_MATCH_C_EBREAK (RVC_ENCODE_FUNCT4(C_EBREAK) | RVC_OPCODE_C2)
+
+#define RVG_MASK_AUIPC (RV_INSN_OPCODE_MASK)
+#define RVG_MASK_JALR (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_JAL (RV_INSN_OPCODE_MASK)
+#define RVG_MASK_FENCE (RV_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JALR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JAL (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_J (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVG_MASK_BEQ (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BNE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BLT (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BGE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BLTU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BGEU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVC_MASK_C_BEQZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_BNEZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_EBREAK 0xffff
+#define RVG_MASK_EBREAK 0xffffffff
+#define RVG_MASK_SRET 0xffffffff
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_GE_32 _UL(0x3)
+#define __INSN_OPCODE_MASK _UL(0x7F)
+#define __INSN_BRANCH_OPCODE _UL(RVG_OPCODE_BRANCH)
+
+#define __RISCV_INSN_FUNCS(name, mask, val) \
+static __always_inline bool riscv_insn_is_##name(u32 code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+
+#if __riscv_xlen == 32
+/* C.JAL is an RV32C-only instruction */
+__RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL)
+#else
+#define riscv_insn_is_c_jal(opcode) 0
+#endif
+__RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC)
+__RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR)
+__RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL)
+__RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J)
+__RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ)
+__RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE)
+__RISCV_INSN_FUNCS(blt, RVG_MASK_BLT, RVG_MATCH_BLT)
+__RISCV_INSN_FUNCS(bge, RVG_MASK_BGE, RVG_MATCH_BGE)
+__RISCV_INSN_FUNCS(bltu, RVG_MASK_BLTU, RVG_MATCH_BLTU)
+__RISCV_INSN_FUNCS(bgeu, RVG_MASK_BGEU, RVG_MATCH_BGEU)
+__RISCV_INSN_FUNCS(c_beqz, RVC_MASK_C_BEQZ, RVC_MATCH_C_BEQZ)
+__RISCV_INSN_FUNCS(c_bnez, RVC_MASK_C_BNEZ, RVC_MATCH_C_BNEZ)
+__RISCV_INSN_FUNCS(c_ebreak, RVC_MASK_C_EBREAK, RVC_MATCH_C_EBREAK)
+__RISCV_INSN_FUNCS(ebreak, RVG_MASK_EBREAK, RVG_MATCH_EBREAK)
+__RISCV_INSN_FUNCS(sret, RVG_MASK_SRET, RVG_MATCH_SRET)
+__RISCV_INSN_FUNCS(fence, RVG_MASK_FENCE, RVG_MATCH_FENCE);
+
+/* special case to catch _any_ system instruction */
+static __always_inline bool riscv_insn_is_system(u32 code)
+{
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_SYSTEM;
+}
+
+/* special case to catch _any_ branch instruction */
+static __always_inline bool riscv_insn_is_branch(u32 code)
+{
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH;
+}
+
+static __always_inline bool riscv_insn_is_c_jr(u32 code)
+{
+ return (code & RVC_MASK_C_JR) == RVC_MATCH_C_JR &&
+ (code & RVC_INSN_J_RS1_MASK) != 0;
+}
+
+static __always_inline bool riscv_insn_is_c_jalr(u32 code)
+{
+ return (code & RVC_MASK_C_JALR) == RVC_MATCH_C_JALR &&
+ (code & RVC_INSN_J_RS1_MASK) != 0;
+}
+
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
+#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
+#define RVC_X(X, s, mask) RV_X(X, s, mask)
+
+#define RV_EXTRACT_RS1_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVG_RS1_OPOFF, RVG_RS1_MASK)); })
+
+#define RV_EXTRACT_RD_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); })
+
+#define RV_EXTRACT_UTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_U_IMM_31_12_OPOFF, RV_U_IMM_31_12_MASK)); })
+
+#define RV_EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_J_IMM_10_1_OPOFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OFF) | \
+ (RV_X(x_, RV_J_IMM_11_OPOFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OFF) | \
+ (RV_X(x_, RV_J_IMM_19_12_OPOFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OFF) | \
+ (RV_IMM_SIGN(x_) << RV_J_IMM_SIGN_OFF); })
+
+#define RV_EXTRACT_ITYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_I_IMM_11_0_OPOFF, RV_I_IMM_11_0_MASK)) | \
+ (RV_IMM_SIGN(x_) << RV_I_IMM_SIGN_OFF); })
+
+#define RV_EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_B_IMM_4_1_OPOFF, RV_B_IMM_4_1_MASK) << RV_B_IMM_4_1_OFF) | \
+ (RV_X(x_, RV_B_IMM_10_5_OPOFF, RV_B_IMM_10_5_MASK) << RV_B_IMM_10_5_OFF) | \
+ (RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \
+ (RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); })
+
+#define RVC_EXTRACT_C2_RS1_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVC_C2_RS1_OPOFF, RVC_C2_RS1_MASK)); })
+
+#define RVC_EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
+
+#define RVC_EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
+
+#define RVG_EXTRACT_SYSTEM_CSR(x) \
+ ({typeof(x) x_ = (x); RV_X(x_, RVG_SYSTEM_CSR_OFF, RVG_SYSTEM_CSR_MASK); })
+
+#define RVFDQ_EXTRACT_FL_FS_WIDTH(x) \
+ ({typeof(x) x_ = (x); RV_X(x_, RVFDQ_FL_FS_WIDTH_OFF, \
+ RVFDQ_FL_FS_WIDTH_MASK); })
+
+#define RVV_EXRACT_VL_VS_WIDTH(x) RVFDQ_EXTRACT_FL_FS_WIDTH(x)
+
+/*
+ * Get the immediate from a J-type instruction.
+ *
+ * @insn: instruction to process
+ * Return: immediate
+ */
+static inline s32 riscv_insn_extract_jtype_imm(u32 insn)
+{
+ return RV_EXTRACT_JTYPE_IMM(insn);
+}
+
+/*
+ * Update a J-type instruction with an immediate value.
+ *
+ * @insn: pointer to the jtype instruction
+ * @imm: the immediate to insert into the instruction
+ */
+static inline void riscv_insn_insert_jtype_imm(u32 *insn, s32 imm)
+{
+ /* drop the old IMMs, all jal IMM bits sit at 31:12 */
+ *insn &= ~GENMASK(31, 12);
+ *insn |= (RV_X(imm, RV_J_IMM_10_1_OFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OPOFF) |
+ (RV_X(imm, RV_J_IMM_11_OFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OPOFF) |
+ (RV_X(imm, RV_J_IMM_19_12_OFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OPOFF) |
+ (RV_X(imm, RV_J_IMM_SIGN_OFF, 1) << RV_J_IMM_SIGN_OPOFF);
+}
+
+/*
+ * Put together one immediate from a U-type and I-type instruction pair.
+ *
+ * The U-type contains an upper immediate, meaning bits[31:12] with [11:0]
+ * being zero, while the I-type contains a 12bit immediate.
+ * Combined these can encode larger 32bit values and are used for example
+ * in auipc + jalr pairs to allow larger jumps.
+ *
+ * @utype_insn: instruction containing the upper immediate
+ * @itype_insn: instruction
+ * Return: combined immediate
+ */
+static inline s32 riscv_insn_extract_utype_itype_imm(u32 utype_insn, u32 itype_insn)
+{
+ s32 imm;
+
+ imm = RV_EXTRACT_UTYPE_IMM(utype_insn);
+ imm += RV_EXTRACT_ITYPE_IMM(itype_insn);
+
+ return imm;
+}
+
+/*
+ * Update a set of two instructions (U-type + I-type) with an immediate value.
+ *
+ * Used for example in auipc+jalrs pairs the U-type instructions contains
+ * a 20bit upper immediate representing bits[31:12], while the I-type
+ * instruction contains a 12bit immediate representing bits[11:0].
+ *
+ * This also takes into account that both separate immediates are
+ * considered as signed values, so if the I-type immediate becomes
+ * negative (BIT(11) set) the U-type part gets adjusted.
+ *
+ * @utype_insn: pointer to the utype instruction of the pair
+ * @itype_insn: pointer to the itype instruction of the pair
+ * @imm: the immediate to insert into the two instructions
+ */
+static inline void riscv_insn_insert_utype_itype_imm(u32 *utype_insn, u32 *itype_insn, s32 imm)
+{
+ /* drop possible old IMM values */
+ *utype_insn &= ~(RV_U_IMM_31_12_MASK);
+ *itype_insn &= ~(RV_I_IMM_11_0_MASK << RV_I_IMM_11_0_OPOFF);
+
+ /* add the adapted IMMs */
+ *utype_insn |= (imm & RV_U_IMM_31_12_MASK) + ((imm & BIT(11)) << 1);
+ *itype_insn |= ((imm & RV_I_IMM_11_0_MASK) << RV_I_IMM_11_0_OPOFF);
+}
+#endif /* _ASM_RISCV_INSN_H */
diff --git a/riscv/include/asm/io.h b/riscv/include/asm/io.h
new file mode 100644
index 0000000..42497d4
--- /dev/null
+++ b/riscv/include/asm/io.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_IO_H
+#define _ASM_RISCV_IO_H
+
+#include <linux/types.h>
+#include <linux/pgtable.h>
+#include <asm/mmiowb.h>
+#include <asm/early_ioremap.h>
+
+/*
+ * MMIO access functions are separated out to break dependency cycles
+ * when using {read,write}* fns in low-level headers
+ */
+#include <asm/mmio.h>
+
+/*
+ * I/O port access constants.
+ */
+#ifdef CONFIG_MMU
+#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
+#define PCI_IOBASE ((void __iomem *)PCI_IO_START)
+#endif /* CONFIG_MMU */
+
+/*
+ * Emulation routines for the port-mapped IO space used by some PCI drivers.
+ * These are defined as being "fully synchronous", but also "not guaranteed to
+ * be fully ordered with respect to other memory and I/O operations". We're
+ * going to be on the safe side here and just make them:
+ * - Fully ordered WRT each other, by bracketing them with two fences. The
+ * outer set contains both I/O so inX is ordered with outX, while the inner just
+ * needs the type of the access (I for inX and O for outX).
+ * - Ordered in the same manner as readX/writeX WRT memory by subsuming their
+ * fences.
+ * - Ordered WRT timer reads, so udelay and friends don't get elided by the
+ * implementation.
+ * Note that there is no way to actually enforce that outX is a non-posted
+ * operation on RISC-V, but hopefully the timer ordering constraint is
+ * sufficient to ensure this works sanely on controllers that support I/O
+ * writes.
+ */
+#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
+#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
+#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
+#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+
+/*
+ * Accesses from a single hart to a single I/O address must be ordered. This
+ * allows us to use the raw read macros, but we still need to fence before and
+ * after the block to ensure ordering WRT other macros. These are defined to
+ * perform host-endian accesses so we use __raw instead of __cpu.
+ */
+#define __io_reads_ins(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(const volatile void __iomem *addr, \
+ void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ ctype *buf = buffer; \
+ \
+ do { \
+ ctype x = __raw_read ## len(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+#define __io_writes_outs(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(volatile void __iomem *addr, \
+ const void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ const ctype *buf = buffer; \
+ \
+ do { \
+ __raw_write ## len(*buf++, addr); \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
+#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
+#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
+#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
+
+__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
+#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
+#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
+#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
+
+__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
+__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
+__io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
+#define writesb(addr, buffer, count) __writesb(addr, buffer, count)
+#define writesw(addr, buffer, count) __writesw(addr, buffer, count)
+#define writesl(addr, buffer, count) __writesl(addr, buffer, count)
+
+__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
+#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
+#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
+#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
+
+#ifdef CONFIG_64BIT
+__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
+#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
+
+__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
+#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
+
+__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
+#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
+
+__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
+#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
+#endif
+
+#include <asm-generic/io.h>
+
+#ifdef CONFIG_MMU
+#define arch_memremap_wb(addr, size) \
+ ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
+#endif
+
+#endif /* _ASM_RISCV_IO_H */
diff --git a/riscv/include/asm/irq.h b/riscv/include/asm/irq.h
new file mode 100644
index 0000000..8e10a94
--- /dev/null
+++ b/riscv/include/asm/irq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_IRQ_H
+#define _ASM_RISCV_IRQ_H
+
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
+#include <asm-generic/irq.h>
+
+void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
+
+struct fwnode_handle *riscv_get_intc_hwnode(void);
+
+#endif /* _ASM_RISCV_IRQ_H */
diff --git a/riscv/include/asm/irq_stack.h b/riscv/include/asm/irq_stack.h
new file mode 100644
index 0000000..6441ded
--- /dev/null
+++ b/riscv/include/asm/irq_stack.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_IRQ_STACK_H
+#define _ASM_RISCV_IRQ_STACK_H
+
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/kconfig.h>
+#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <asm/thread_info.h>
+
+DECLARE_PER_CPU(ulong *, irq_stack_ptr);
+
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+ void (*func)(struct pt_regs *));
+
+#ifdef CONFIG_VMAP_STACK
+/*
+ * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
+ * stacks need to have the same alignment.
+ */
+static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
+{
+ void *p;
+
+ p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
+ __builtin_return_address(0));
+ return kasan_reset_tag(p);
+}
+#endif /* CONFIG_VMAP_STACK */
+
+#endif /* _ASM_RISCV_IRQ_STACK_H */
diff --git a/riscv/include/asm/irq_work.h b/riscv/include/asm/irq_work.h
new file mode 100644
index 0000000..b27a4d6
--- /dev/null
+++ b/riscv/include/asm/irq_work.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_IRQ_WORK_H
+#define _ASM_RISCV_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return IS_ENABLED(CONFIG_SMP);
+}
+
+#endif /* _ASM_RISCV_IRQ_WORK_H */
diff --git a/riscv/include/asm/irqflags.h b/riscv/include/asm/irqflags.h
new file mode 100644
index 0000000..08d4d6a
--- /dev/null
+++ b/riscv/include/asm/irqflags.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#ifndef _ASM_RISCV_IRQFLAGS_H
+#define _ASM_RISCV_IRQFLAGS_H
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/* read interrupt enabled status */
+static inline unsigned long arch_local_save_flags(void)
+{
+ return csr_read(CSR_STATUS);
+}
+
+/* unconditionally enable interrupts */
+static inline void arch_local_irq_enable(void)
+{
+ csr_set(CSR_STATUS, SR_IE);
+}
+
+/* unconditionally disable interrupts */
+static inline void arch_local_irq_disable(void)
+{
+ csr_clear(CSR_STATUS, SR_IE);
+}
+
+/* get status and disable interrupts */
+static inline unsigned long arch_local_irq_save(void)
+{
+ return csr_read_clear(CSR_STATUS, SR_IE);
+}
+
+/* test flags */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & SR_IE);
+}
+
+/* test hardware interrupt enable bit */
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+/* set interrupt enabled status */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ csr_set(CSR_STATUS, flags & SR_IE);
+}
+
+#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/riscv/include/asm/jump_label.h b/riscv/include/asm/jump_label.h
new file mode 100644
index 0000000..14a5ea8
--- /dev/null
+++ b/riscv/include/asm/jump_label.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Emil Renner Berthing
+ *
+ * Based on arch/arm64/include/asm/jump_label.h
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/asm.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key * const key,
+ const bool branch)
+{
+ asm_volatile_goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+ " .option norvc \n\t"
+ "1: nop \n\t"
+ " .option pop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align " RISCV_LGPTR " \n\t"
+ " .long 1b - ., %l[label] - . \n\t"
+ " " RISCV_PTR " %0 - . \n\t"
+ " .popsection \n\t"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key * const key,
+ const bool branch)
+{
+ asm_volatile_goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+ " .option norvc \n\t"
+ "1: jal zero, %l[label] \n\t"
+ " .option pop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align " RISCV_LGPTR " \n\t"
+ " .long 1b - ., %l[label] - . \n\t"
+ " " RISCV_PTR " %0 - . \n\t"
+ " .popsection \n\t"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/riscv/include/asm/kasan.h b/riscv/include/asm/kasan.h
new file mode 100644
index 0000000..0b85e36
--- /dev/null
+++ b/riscv/include/asm/kasan.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Andes Technology Corporation */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+/*
+ * The following comment was copied from arm64:
+ * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
+ * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
+ * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
+ *
+ * KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
+ *
+ * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
+ * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
+ * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
+ * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
+ * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
+ */
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
+/*
+ * Depending on the size of the virtual address space, the region may not be
+ * aligned on PGDIR_SIZE, so force its alignment to ease its population.
+ */
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK)
+#define KASAN_SHADOW_END MODULES_LOWEST_VADDR
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+void kasan_swapper_init(void);
+
+#endif
+#endif
+#endif /* __ASM_KASAN_H */
diff --git a/riscv/include/asm/kdebug.h b/riscv/include/asm/kdebug.h
new file mode 100644
index 0000000..85ac004
--- /dev/null
+++ b/riscv/include/asm/kdebug.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_OOPS
+};
+
+#endif
diff --git a/riscv/include/asm/kexec.h b/riscv/include/asm/kexec.h
new file mode 100644
index 0000000..2b56769
--- /dev/null
+++ b/riscv/include/asm/kexec.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#ifndef _RISCV_KEXEC_H
+#define _RISCV_KEXEC_H
+
+#include <asm/page.h> /* For PAGE_SIZE */
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+/* Reserve a page for the control code buffer */
+#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
+
+#define KEXEC_ARCH KEXEC_ARCH_RISCV
+
+extern void riscv_crash_save_regs(struct pt_regs *newregs);
+
+static inline void
+crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(struct pt_regs));
+ else
+ riscv_crash_save_regs(newregs);
+}
+
+
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+ void *fdt; /* For CONFIG_KEXEC_FILE */
+ unsigned long fdt_addr;
+};
+
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
+
+typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
+ unsigned long jump_addr,
+ unsigned long fdt_addr,
+ unsigned long hartid,
+ unsigned long va_pa_off);
+
+extern riscv_kexec_method riscv_kexec_norelocate;
+
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops elf_kexec_ops;
+
+struct purgatory_info;
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+
+struct kimage;
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+#endif
+
+#endif
diff --git a/riscv/include/asm/kfence.h b/riscv/include/asm/kfence.h
new file mode 100644
index 0000000..0bbffd5
--- /dev/null
+++ b/riscv/include/asm/kfence.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_KFENCE_H
+#define _ASM_RISCV_KFENCE_H
+
+#include <linux/kfence.h>
+#include <linux/pfn.h>
+#include <asm-generic/pgalloc.h>
+#include <asm/pgtable.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *pte = virt_to_kpte(addr);
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ return true;
+}
+
+#endif /* _ASM_RISCV_KFENCE_H */
diff --git a/riscv/include/asm/kgdb.h b/riscv/include/asm/kgdb.h
new file mode 100644
index 0000000..46677da
--- /dev/null
+++ b/riscv/include/asm/kgdb.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_KGDB_H_
+#define __ASM_KGDB_H_
+
+#ifdef __KERNEL__
+
+#define GDB_SIZEOF_REG sizeof(unsigned long)
+
+#define DBG_MAX_REG_NUM (36)
+#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG)
+#define CACHE_FLUSH_IS_SAFE 1
+#define BUFMAX 2048
+#ifdef CONFIG_RISCV_ISA_C
+#define BREAK_INSTR_SIZE 2
+#else
+#define BREAK_INSTR_SIZE 4
+#endif
+
+#ifndef __ASSEMBLY__
+
+extern unsigned long kgdb_compiled_break;
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ ".option norvc\n"
+ "kgdb_compiled_break: ebreak\n"
+ ".option rvc\n");
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define DBG_REG_ZERO "zero"
+#define DBG_REG_RA "ra"
+#define DBG_REG_SP "sp"
+#define DBG_REG_GP "gp"
+#define DBG_REG_TP "tp"
+#define DBG_REG_T0 "t0"
+#define DBG_REG_T1 "t1"
+#define DBG_REG_T2 "t2"
+#define DBG_REG_FP "fp"
+#define DBG_REG_S1 "s1"
+#define DBG_REG_A0 "a0"
+#define DBG_REG_A1 "a1"
+#define DBG_REG_A2 "a2"
+#define DBG_REG_A3 "a3"
+#define DBG_REG_A4 "a4"
+#define DBG_REG_A5 "a5"
+#define DBG_REG_A6 "a6"
+#define DBG_REG_A7 "a7"
+#define DBG_REG_S2 "s2"
+#define DBG_REG_S3 "s3"
+#define DBG_REG_S4 "s4"
+#define DBG_REG_S5 "s5"
+#define DBG_REG_S6 "s6"
+#define DBG_REG_S7 "s7"
+#define DBG_REG_S8 "s8"
+#define DBG_REG_S9 "s9"
+#define DBG_REG_S10 "s10"
+#define DBG_REG_S11 "s11"
+#define DBG_REG_T3 "t3"
+#define DBG_REG_T4 "t4"
+#define DBG_REG_T5 "t5"
+#define DBG_REG_T6 "t6"
+#define DBG_REG_EPC "pc"
+#define DBG_REG_STATUS "sstatus"
+#define DBG_REG_BADADDR "stval"
+#define DBG_REG_CAUSE "scause"
+
+#define DBG_REG_ZERO_OFF 0
+#define DBG_REG_RA_OFF 1
+#define DBG_REG_SP_OFF 2
+#define DBG_REG_GP_OFF 3
+#define DBG_REG_TP_OFF 4
+#define DBG_REG_T0_OFF 5
+#define DBG_REG_T1_OFF 6
+#define DBG_REG_T2_OFF 7
+#define DBG_REG_FP_OFF 8
+#define DBG_REG_S1_OFF 9
+#define DBG_REG_A0_OFF 10
+#define DBG_REG_A1_OFF 11
+#define DBG_REG_A2_OFF 12
+#define DBG_REG_A3_OFF 13
+#define DBG_REG_A4_OFF 14
+#define DBG_REG_A5_OFF 15
+#define DBG_REG_A6_OFF 16
+#define DBG_REG_A7_OFF 17
+#define DBG_REG_S2_OFF 18
+#define DBG_REG_S3_OFF 19
+#define DBG_REG_S4_OFF 20
+#define DBG_REG_S5_OFF 21
+#define DBG_REG_S6_OFF 22
+#define DBG_REG_S7_OFF 23
+#define DBG_REG_S8_OFF 24
+#define DBG_REG_S9_OFF 25
+#define DBG_REG_S10_OFF 26
+#define DBG_REG_S11_OFF 27
+#define DBG_REG_T3_OFF 28
+#define DBG_REG_T4_OFF 29
+#define DBG_REG_T5_OFF 30
+#define DBG_REG_T6_OFF 31
+#define DBG_REG_EPC_OFF 32
+#define DBG_REG_STATUS_OFF 33
+#define DBG_REG_BADADDR_OFF 34
+#define DBG_REG_CAUSE_OFF 35
+
+extern const char riscv_gdb_stub_feature[64];
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
+
+#endif
+#endif
diff --git a/riscv/include/asm/kprobes.h b/riscv/include/asm/kprobes.h
new file mode 100644
index 0000000..78ea44f
--- /dev/null
+++ b/riscv/include/asm/kprobes.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from arch/arm64/include/asm/kprobes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_KPROBES_H
+#define _ASM_RISCV_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 2
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_status;
+ struct prev_kprobe prev_kprobe;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
+bool kprobe_breakpoint_handler(struct pt_regs *regs);
+bool kprobe_single_step_handler(struct pt_regs *regs);
+#else
+static inline bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline bool kprobe_single_step_handler(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_KPROBES */
+#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/riscv/include/asm/kvm_aia.h b/riscv/include/asm/kvm_aia.h
new file mode 100644
index 0000000..1f37b60
--- /dev/null
+++ b/riscv/include/asm/kvm_aia.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ * Anup Patel <apatel@ventanamicro.com>
+ */
+
+#ifndef __KVM_RISCV_AIA_H
+#define __KVM_RISCV_AIA_H
+
+#include <linux/jump_label.h>
+#include <linux/kvm_types.h>
+#include <asm/csr.h>
+
+struct kvm_aia {
+ /* In-kernel irqchip created */
+ bool in_kernel;
+
+ /* In-kernel irqchip initialized */
+ bool initialized;
+
+ /* Virtualization mode (Emulation, HW Accelerated, or Auto) */
+ u32 mode;
+
+ /* Number of MSIs */
+ u32 nr_ids;
+
+ /* Number of wired IRQs */
+ u32 nr_sources;
+
+ /* Number of group bits in IMSIC address */
+ u32 nr_group_bits;
+
+ /* Position of group bits in IMSIC address */
+ u32 nr_group_shift;
+
+ /* Number of hart bits in IMSIC address */
+ u32 nr_hart_bits;
+
+ /* Number of guest bits in IMSIC address */
+ u32 nr_guest_bits;
+
+ /* Guest physical address of APLIC */
+ gpa_t aplic_addr;
+
+ /* Internal state of APLIC */
+ void *aplic_state;
+};
+
+struct kvm_vcpu_aia_csr {
+ unsigned long vsiselect;
+ unsigned long hviprio1;
+ unsigned long hviprio2;
+ unsigned long vsieh;
+ unsigned long hviph;
+ unsigned long hviprio1h;
+ unsigned long hviprio2h;
+};
+
+struct kvm_vcpu_aia {
+ /* CPU AIA CSR context of Guest VCPU */
+ struct kvm_vcpu_aia_csr guest_csr;
+
+ /* CPU AIA CSR context upon Guest VCPU reset */
+ struct kvm_vcpu_aia_csr guest_reset_csr;
+
+ /* Guest physical address of IMSIC for this VCPU */
+ gpa_t imsic_addr;
+
+ /* HART index of IMSIC extacted from guest physical address */
+ u32 hart_index;
+
+ /* Internal state of IMSIC for this VCPU */
+ void *imsic_state;
+};
+
+#define KVM_RISCV_AIA_UNDEF_ADDR (-1)
+
+#define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized)
+
+#define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel)
+
+extern unsigned int kvm_riscv_aia_nr_hgei;
+extern unsigned int kvm_riscv_aia_max_ids;
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
+#define kvm_riscv_aia_available() \
+ static_branch_unlikely(&kvm_riscv_aia_available)
+
+extern struct kvm_device_ops kvm_riscv_aia_device_ops;
+
+void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
+
+#define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
+int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
+ bool write, unsigned long *val);
+int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type);
+void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
+ u32 guest_index, u32 offset, u32 iid);
+int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu);
+
+int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v);
+int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v);
+int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type);
+int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level);
+int kvm_riscv_aia_aplic_init(struct kvm *kvm);
+void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm);
+
+#ifdef CONFIG_32BIT
+void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
+
+void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *out_val);
+int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long val);
+
+int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
+ unsigned int csr_num,
+ unsigned long *val,
+ unsigned long new_val,
+ unsigned long wr_mask);
+int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+#define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
+{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
+{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
+
+int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
+
+int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
+ u32 guest_index, u32 iid);
+int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
+int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level);
+
+void kvm_riscv_aia_init_vm(struct kvm *kvm);
+void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
+
+int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
+ void __iomem **hgei_va, phys_addr_t *hgei_pa);
+void kvm_riscv_aia_free_hgei(int cpu, int hgei);
+void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
+
+void kvm_riscv_aia_enable(void);
+void kvm_riscv_aia_disable(void);
+int kvm_riscv_aia_init(void);
+void kvm_riscv_aia_exit(void);
+
+#endif
diff --git a/riscv/include/asm/kvm_aia_aplic.h b/riscv/include/asm/kvm_aia_aplic.h
new file mode 100644
index 0000000..6dd1a48
--- /dev/null
+++ b/riscv/include/asm/kvm_aia_aplic.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __KVM_RISCV_AIA_IMSIC_H
+#define __KVM_RISCV_AIA_IMSIC_H
+
+#include <linux/bitops.h>
+
+#define APLIC_MAX_IDC BIT(14)
+#define APLIC_MAX_SOURCE 1024
+
+#define APLIC_DOMAINCFG 0x0000
+#define APLIC_DOMAINCFG_RDONLY 0x80000000
+#define APLIC_DOMAINCFG_IE BIT(8)
+#define APLIC_DOMAINCFG_DM BIT(2)
+#define APLIC_DOMAINCFG_BE BIT(0)
+
+#define APLIC_SOURCECFG_BASE 0x0004
+#define APLIC_SOURCECFG_D BIT(10)
+#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
+#define APLIC_SOURCECFG_SM_MASK 0x00000007
+#define APLIC_SOURCECFG_SM_INACTIVE 0x0
+#define APLIC_SOURCECFG_SM_DETACH 0x1
+#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
+#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
+#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
+#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
+
+#define APLIC_IRQBITS_PER_REG 32
+
+#define APLIC_SETIP_BASE 0x1c00
+#define APLIC_SETIPNUM 0x1cdc
+
+#define APLIC_CLRIP_BASE 0x1d00
+#define APLIC_CLRIPNUM 0x1ddc
+
+#define APLIC_SETIE_BASE 0x1e00
+#define APLIC_SETIENUM 0x1edc
+
+#define APLIC_CLRIE_BASE 0x1f00
+#define APLIC_CLRIENUM 0x1fdc
+
+#define APLIC_SETIPNUM_LE 0x2000
+#define APLIC_SETIPNUM_BE 0x2004
+
+#define APLIC_GENMSI 0x3000
+
+#define APLIC_TARGET_BASE 0x3004
+#define APLIC_TARGET_HART_IDX_SHIFT 18
+#define APLIC_TARGET_HART_IDX_MASK 0x3fff
+#define APLIC_TARGET_GUEST_IDX_SHIFT 12
+#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
+#define APLIC_TARGET_IPRIO_MASK 0xff
+#define APLIC_TARGET_EIID_MASK 0x7ff
+
+#endif
diff --git a/riscv/include/asm/kvm_aia_imsic.h b/riscv/include/asm/kvm_aia_imsic.h
new file mode 100644
index 0000000..da5881d
--- /dev/null
+++ b/riscv/include/asm/kvm_aia_imsic.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __KVM_RISCV_AIA_IMSIC_H
+#define __KVM_RISCV_AIA_IMSIC_H
+
+#include <linux/types.h>
+#include <asm/csr.h>
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_PAGE_LE 0x00
+#define IMSIC_MMIO_PAGE_BE 0x04
+
+#define IMSIC_MIN_ID 63
+#define IMSIC_MAX_ID 2048
+
+#define IMSIC_EIDELIVERY 0x70
+
+#define IMSIC_EITHRESHOLD 0x72
+
+#define IMSIC_EIP0 0x80
+#define IMSIC_EIP63 0xbf
+#define IMSIC_EIPx_BITS 32
+
+#define IMSIC_EIE0 0xc0
+#define IMSIC_EIE63 0xff
+#define IMSIC_EIEx_BITS 32
+
+#define IMSIC_FIRST IMSIC_EIDELIVERY
+#define IMSIC_LAST IMSIC_EIE63
+
+#define IMSIC_MMIO_SETIPNUM_LE 0x00
+#define IMSIC_MMIO_SETIPNUM_BE 0x04
+
+#endif
diff --git a/riscv/include/asm/kvm_host.h b/riscv/include/asm/kvm_host.h
new file mode 100644
index 0000000..0eefd9c
--- /dev/null
+++ b/riscv/include/asm/kvm_host.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#ifndef __RISCV_KVM_HOST_H__
+#define __RISCV_KVM_HOST_H__
+
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/spinlock.h>
+#include <asm/hwcap.h>
+#include <asm/kvm_aia.h>
+#include <asm/ptrace.h>
+#include <asm/kvm_vcpu_fp.h>
+#include <asm/kvm_vcpu_insn.h>
+#include <asm/kvm_vcpu_sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_pmu.h>
+
+#define KVM_MAX_VCPUS 1024
+
+#define KVM_HALT_POLL_NS_DEFAULT 500000
+
+#define KVM_VCPU_MAX_FEATURES 0
+
+#define KVM_IRQCHIP_NUM_PINS 1024
+
+#define KVM_REQ_SLEEP \
+ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
+#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
+#define KVM_REQ_FENCE_I \
+ KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
+#define KVM_REQ_HFENCE_VVMA_ALL \
+ KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_HFENCE \
+ KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+
+enum kvm_riscv_hfence_type {
+ KVM_RISCV_HFENCE_UNKNOWN = 0,
+ KVM_RISCV_HFENCE_GVMA_VMID_GPA,
+ KVM_RISCV_HFENCE_VVMA_ASID_GVA,
+ KVM_RISCV_HFENCE_VVMA_ASID_ALL,
+ KVM_RISCV_HFENCE_VVMA_GVA,
+};
+
+struct kvm_riscv_hfence {
+ enum kvm_riscv_hfence_type type;
+ unsigned long asid;
+ unsigned long order;
+ gpa_t addr;
+ gpa_t size;
+};
+
+#define KVM_RISCV_VCPU_MAX_HFENCE 64
+
+struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
+};
+
+struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
+ u64 ecall_exit_stat;
+ u64 wfi_exit_stat;
+ u64 mmio_exit_user;
+ u64 mmio_exit_kernel;
+ u64 csr_exit_user;
+ u64 csr_exit_kernel;
+ u64 signal_exits;
+ u64 exits;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_vmid {
+ /*
+ * Writes to vmid_version and vmid happen with vmid_lock held
+ * whereas reads happen without any lock held.
+ */
+ unsigned long vmid_version;
+ unsigned long vmid;
+};
+
+struct kvm_arch {
+ /* G-stage vmid */
+ struct kvm_vmid vmid;
+
+ /* G-stage page table */
+ pgd_t *pgd;
+ phys_addr_t pgd_phys;
+
+ /* Guest Timer */
+ struct kvm_guest_timer timer;
+
+ /* AIA Guest/VM context */
+ struct kvm_aia aia;
+};
+
+struct kvm_cpu_trap {
+ unsigned long sepc;
+ unsigned long scause;
+ unsigned long stval;
+ unsigned long htval;
+ unsigned long htinst;
+};
+
+struct kvm_cpu_context {
+ unsigned long zero;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+ unsigned long sepc;
+ unsigned long sstatus;
+ unsigned long hstatus;
+ union __riscv_fp_state fp;
+ struct __riscv_v_ext_state vector;
+};
+
+struct kvm_vcpu_csr {
+ unsigned long vsstatus;
+ unsigned long vsie;
+ unsigned long vstvec;
+ unsigned long vsscratch;
+ unsigned long vsepc;
+ unsigned long vscause;
+ unsigned long vstval;
+ unsigned long hvip;
+ unsigned long vsatp;
+ unsigned long scounteren;
+ unsigned long senvcfg;
+};
+
+struct kvm_vcpu_config {
+ u64 henvcfg;
+ u64 hstateen0;
+};
+
+struct kvm_vcpu_smstateen_csr {
+ unsigned long sstateen0;
+};
+
+struct kvm_vcpu_arch {
+ /* VCPU ran at least once */
+ bool ran_atleast_once;
+
+ /* Last Host CPU on which Guest VCPU exited */
+ int last_exit_cpu;
+
+ /* ISA feature bits (similar to MISA) */
+ DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
+
+ /* Vendor, Arch, and Implementation details */
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+
+ /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
+ unsigned long host_sscratch;
+ unsigned long host_stvec;
+ unsigned long host_scounteren;
+ unsigned long host_senvcfg;
+ unsigned long host_sstateen0;
+
+ /* CPU context of Host */
+ struct kvm_cpu_context host_context;
+
+ /* CPU context of Guest VCPU */
+ struct kvm_cpu_context guest_context;
+
+ /* CPU CSR context of Guest VCPU */
+ struct kvm_vcpu_csr guest_csr;
+
+ /* CPU Smstateen CSR context of Guest VCPU */
+ struct kvm_vcpu_smstateen_csr smstateen_csr;
+
+ /* CPU context upon Guest VCPU reset */
+ struct kvm_cpu_context guest_reset_context;
+
+ /* CPU CSR context upon Guest VCPU reset */
+ struct kvm_vcpu_csr guest_reset_csr;
+
+ /*
+ * VCPU interrupts
+ *
+ * We have a lockless approach for tracking pending VCPU interrupts
+ * implemented using atomic bitops. The irqs_pending bitmap represent
+ * pending interrupts whereas irqs_pending_mask represent bits changed
+ * in irqs_pending. Our approach is modeled around multiple producer
+ * and single consumer problem where the consumer is the VCPU itself.
+ */
+#define KVM_RISCV_VCPU_NR_IRQS 64
+ DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
+ DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
+
+ /* VCPU Timer */
+ struct kvm_vcpu_timer timer;
+
+ /* HFENCE request queue */
+ spinlock_t hfence_lock;
+ unsigned long hfence_head;
+ unsigned long hfence_tail;
+ struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
+
+ /* MMIO instruction details */
+ struct kvm_mmio_decode mmio_decode;
+
+ /* CSR instruction details */
+ struct kvm_csr_decode csr_decode;
+
+ /* SBI context */
+ struct kvm_vcpu_sbi_context sbi_context;
+
+ /* AIA VCPU context */
+ struct kvm_vcpu_aia aia_context;
+
+ /* Cache pages needed to program page tables with spinlock held */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* VCPU power-off state */
+ bool power_off;
+
+ /* Don't run the VCPU (blocked) */
+ bool pause;
+
+ /* Performance monitoring context */
+ struct kvm_pmu pmu_context;
+
+ /* 'static' configurations which are set only once */
+ struct kvm_vcpu_config cfg;
+};
+
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+
+#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
+
+void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
+ gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
+void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_gvma_all(void);
+void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
+ unsigned long asid,
+ unsigned long gva,
+ unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
+ unsigned long asid);
+void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
+
+void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
+
+void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
+
+void kvm_riscv_fence_i(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask);
+void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask);
+void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order, unsigned long asid);
+void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long asid);
+void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask);
+
+int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
+ phys_addr_t hpa, unsigned long size,
+ bool writable, bool in_atomic);
+void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
+ unsigned long size);
+int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot,
+ gpa_t gpa, unsigned long hva, bool is_write);
+int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
+void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
+void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
+void __init kvm_riscv_gstage_mode_detect(void);
+unsigned long __init kvm_riscv_gstage_mode(void);
+int kvm_riscv_gstage_gpa_bits(void);
+
+void __init kvm_riscv_gstage_vmid_detect(void);
+unsigned long kvm_riscv_gstage_vmid_bits(void);
+int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
+bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
+void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
+
+int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
+
+void __kvm_riscv_unpriv_trap(void);
+
+unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
+ bool read_insn,
+ unsigned long guest_addr,
+ struct kvm_cpu_trap *trap);
+void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_trap *trap);
+int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_cpu_trap *trap);
+
+void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
+
+void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *uindices);
+int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
+bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+
+#endif /* __RISCV_KVM_HOST_H__ */
diff --git a/riscv/include/asm/kvm_types.h b/riscv/include/asm/kvm_types.h
new file mode 100644
index 0000000..e15765f
--- /dev/null
+++ b/riscv/include/asm/kvm_types.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_KVM_TYPES_H
+#define _ASM_RISCV_KVM_TYPES_H
+
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32
+
+#endif /* _ASM_RISCV_KVM_TYPES_H */
diff --git a/riscv/include/asm/kvm_vcpu_fp.h b/riscv/include/asm/kvm_vcpu_fp.h
new file mode 100644
index 0000000..b554014
--- /dev/null
+++ b/riscv/include/asm/kvm_vcpu_fp.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_FP_H
+#define __KVM_VCPU_RISCV_FP_H
+
+#include <linux/types.h>
+
+struct kvm_cpu_context;
+
+#ifdef CONFIG_FPU
+void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context);
+void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context);
+void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context);
+void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);
+
+void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
+ const unsigned long *isa);
+void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
+ const unsigned long *isa);
+void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx);
+void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx);
+#else
+static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
+{
+}
+static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
+ const unsigned long *isa)
+{
+}
+static inline void kvm_riscv_vcpu_guest_fp_restore(
+ struct kvm_cpu_context *cntx,
+ const unsigned long *isa)
+{
+}
+static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
+{
+}
+static inline void kvm_riscv_vcpu_host_fp_restore(
+ struct kvm_cpu_context *cntx)
+{
+}
+#endif
+
+int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ unsigned long rtype);
+int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ unsigned long rtype);
+
+#endif
diff --git a/riscv/include/asm/kvm_vcpu_insn.h b/riscv/include/asm/kvm_vcpu_insn.h
new file mode 100644
index 0000000..350011c
--- /dev/null
+++ b/riscv/include/asm/kvm_vcpu_insn.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef __KVM_VCPU_RISCV_INSN_H
+#define __KVM_VCPU_RISCV_INSN_H
+
+struct kvm_vcpu;
+struct kvm_run;
+struct kvm_cpu_trap;
+
+struct kvm_mmio_decode {
+ unsigned long insn;
+ int insn_len;
+ int len;
+ int shift;
+ int return_handled;
+};
+
+struct kvm_csr_decode {
+ unsigned long insn;
+ int return_handled;
+};
+
+/* Return values used by function emulating a particular instruction */
+enum kvm_insn_return {
+ KVM_INSN_EXIT_TO_USER_SPACE = 0,
+ KVM_INSN_CONTINUE_NEXT_SEPC,
+ KVM_INSN_CONTINUE_SAME_SEPC,
+ KVM_INSN_ILLEGAL_TRAP,
+ KVM_INSN_VIRTUAL_TRAP
+};
+
+void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_cpu_trap *trap);
+
+int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst);
+int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst);
+int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+#endif
diff --git a/riscv/include/asm/kvm_vcpu_pmu.h b/riscv/include/asm/kvm_vcpu_pmu.h
new file mode 100644
index 0000000..395518a
--- /dev/null
+++ b/riscv/include/asm/kvm_vcpu_pmu.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Rivos Inc
+ *
+ * Authors:
+ * Atish Patra <atishp@rivosinc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_PMU_H
+#define __KVM_VCPU_RISCV_PMU_H
+
+#include <linux/perf/riscv_pmu.h>
+#include <asm/sbi.h>
+
+#ifdef CONFIG_RISCV_PMU_SBI
+#define RISCV_KVM_MAX_FW_CTRS 32
+#define RISCV_KVM_MAX_HW_CTRS 32
+#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
+static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
+
+struct kvm_fw_event {
+ /* Current value of the event */
+ unsigned long value;
+
+ /* Event monitoring status */
+ bool started;
+};
+
+/* Per virtual pmu counter data */
+struct kvm_pmc {
+ u8 idx;
+ struct perf_event *perf_event;
+ u64 counter_val;
+ union sbi_pmu_ctr_info cinfo;
+ /* Event monitoring status */
+ bool started;
+ /* Monitoring event ID */
+ unsigned long event_idx;
+};
+
+/* PMU data structure per vcpu */
+struct kvm_pmu {
+ struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
+ struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
+ /* Number of the virtual firmware counters available */
+ int num_fw_ctrs;
+ /* Number of the virtual hardware counters available */
+ int num_hw_ctrs;
+ /* A flag to indicate that pmu initialization is done */
+ bool init_done;
+ /* Bit map of all the virtual counter used */
+ DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
+};
+
+#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
+#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
+
+#if defined(CONFIG_32BIT)
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#else
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#endif
+
+int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
+int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+
+int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags, u64 ival,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ unsigned long eidx, u64 evtdata,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
+
+#else
+struct kvm_pmu {
+};
+
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = 0, .count = 0, .func = NULL },
+
+static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
+static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
+{
+ return 0;
+}
+
+static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
+#endif /* CONFIG_RISCV_PMU_SBI */
+#endif /* !__KVM_VCPU_RISCV_PMU_H */
diff --git a/riscv/include/asm/kvm_vcpu_sbi.h b/riscv/include/asm/kvm_vcpu_sbi.h
new file mode 100644
index 0000000..6a453f7
--- /dev/null
+++ b/riscv/include/asm/kvm_vcpu_sbi.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/**
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __RISCV_KVM_VCPU_SBI_H__
+#define __RISCV_KVM_VCPU_SBI_H__
+
+#define KVM_SBI_IMPID 3
+
+#define KVM_SBI_VERSION_MAJOR 2
+#define KVM_SBI_VERSION_MINOR 0
+
+enum kvm_riscv_sbi_ext_status {
+ KVM_RISCV_SBI_EXT_UNINITIALIZED,
+ KVM_RISCV_SBI_EXT_AVAILABLE,
+ KVM_RISCV_SBI_EXT_UNAVAILABLE,
+};
+
+struct kvm_vcpu_sbi_context {
+ int return_handled;
+ enum kvm_riscv_sbi_ext_status ext_status[KVM_RISCV_SBI_EXT_MAX];
+};
+
+struct kvm_vcpu_sbi_return {
+ unsigned long out_val;
+ unsigned long err_val;
+ struct kvm_cpu_trap *utrap;
+ bool uexit;
+};
+
+struct kvm_vcpu_sbi_extension {
+ unsigned long extid_start;
+ unsigned long extid_end;
+
+ bool default_unavail;
+
+ /**
+ * SBI extension handler. It can be defined for a given extension or group of
+ * extension. But it should always return linux error codes rather than SBI
+ * specific error codes.
+ */
+ int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata);
+
+ /* Extension specific probe function */
+ unsigned long (*probe)(struct kvm_vcpu *vcpu);
+};
+
+void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
+void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ u32 type, u64 flags);
+int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
+ struct kvm_vcpu *vcpu, unsigned long extid);
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_RISCV_SBI_V01
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
+#endif
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
+
+#ifdef CONFIG_RISCV_PMU_SBI
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
+#endif
+#endif /* __RISCV_KVM_VCPU_SBI_H__ */
diff --git a/riscv/include/asm/kvm_vcpu_timer.h b/riscv/include/asm/kvm_vcpu_timer.h
new file mode 100644
index 0000000..82f7260
--- /dev/null
+++ b/riscv/include/asm/kvm_vcpu_timer.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_TIMER_H
+#define __KVM_VCPU_RISCV_TIMER_H
+
+#include <linux/hrtimer.h>
+
+struct kvm_guest_timer {
+ /* Mult & Shift values to get nanoseconds from cycles */
+ u32 nsec_mult;
+ u32 nsec_shift;
+ /* Time delta value */
+ u64 time_delta;
+};
+
+struct kvm_vcpu_timer {
+ /* Flag for whether init is done */
+ bool init_done;
+ /* Flag for whether timer event is configured */
+ bool next_set;
+ /* Next timer event cycles */
+ u64 next_cycles;
+ /* Underlying hrtimer instance */
+ struct hrtimer hrt;
+
+ /* Flag to check if sstc is enabled or not */
+ bool sstc_enabled;
+ /* A function pointer to switch between stimecmp or hrtimer at runtime */
+ int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles);
+};
+
+int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
+int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
+void kvm_riscv_guest_timer_init(struct kvm *kvm);
+void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
+bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/riscv/include/asm/kvm_vcpu_vector.h b/riscv/include/asm/kvm_vcpu_vector.h
new file mode 100644
index 0000000..27f5bcc
--- /dev/null
+++ b/riscv/include/asm/kvm_vcpu_vector.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 SiFive
+ *
+ * Authors:
+ * Vincent Chen <vincent.chen@sifive.com>
+ * Greentime Hu <greentime.hu@sifive.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_VECTOR_H
+#define __KVM_VCPU_RISCV_VECTOR_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_RISCV_ISA_V
+#include <asm/vector.h>
+#include <asm/kvm_host.h>
+
+static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context)
+{
+ __riscv_v_vstate_save(&context->vector, context->vector.datap);
+}
+
+static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context)
+{
+ __riscv_v_vstate_restore(&context->vector, context->vector.datap);
+}
+
+void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
+ unsigned long *isa);
+void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
+ unsigned long *isa);
+void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
+void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
+int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *cntx);
+void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
+#else
+
+struct kvm_cpu_context;
+
+static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
+ unsigned long *isa)
+{
+}
+
+static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
+ unsigned long *isa)
+{
+}
+
+static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
+{
+}
+
+static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
+{
+}
+
+static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *cntx)
+{
+ return 0;
+}
+
+static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
+int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+#endif
diff --git a/riscv/include/asm/linkage.h b/riscv/include/asm/linkage.h
new file mode 100644
index 0000000..9e88ba2
--- /dev/null
+++ b/riscv/include/asm/linkage.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_LINKAGE_H
+#define _ASM_RISCV_LINKAGE_H
+
+#define __ALIGN .balign 4
+#define __ALIGN_STR ".balign 4"
+
+#endif /* _ASM_RISCV_LINKAGE_H */
diff --git a/riscv/include/asm/mmio.h b/riscv/include/asm/mmio.h
new file mode 100644
index 0000000..4c58ee7
--- /dev/null
+++ b/riscv/include/asm/mmio.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_MMIO_H
+#define _ASM_RISCV_MMIO_H
+
+#include <linux/types.h>
+#include <asm/mmiowb.h>
+
+/* Generic IO read/write. These perform native-endian accesses. */
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+{
+ asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+#endif
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+#endif
+
+/*
+ * Unordered I/O memory access primitives. These are even more relaxed than
+ * the relaxed versions, as they don't even order accesses between successive
+ * operations to the I/O regions.
+ */
+#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
+#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
+#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
+
+#ifdef CONFIG_64BIT
+#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
+#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
+#endif
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses. These are defined to order the indicated access (either a read or
+ * write) with all other I/O memory accesses to the same peripheral. Since the
+ * platform specification defines that all I/O regions are strongly ordered on
+ * channel 0, no explicit fences are required to enforce this ordering.
+ */
+/* FIXME: These are now the same as asm-generic */
+#define __io_rbr() do {} while (0)
+#define __io_rar() do {} while (0)
+#define __io_rbw() do {} while (0)
+#define __io_raw() do {} while (0)
+
+#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
+#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
+#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
+
+#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
+#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
+#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
+
+#ifdef CONFIG_64BIT
+#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
+#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
+#endif
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any following
+ * Normal memory read and delay() loop. Writes are ordered relative to any
+ * prior Normal memory write. The memory barriers here are necessary as RISC-V
+ * doesn't define any ordering between the memory space and the I/O space.
+ */
+#define __io_br() do {} while (0)
+#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
+#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_aw() mmiowb_set_pending()
+
+#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
+#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
+
+#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
+#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
+#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
+
+#ifdef CONFIG_64BIT
+#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
+#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
+#endif
+
+#endif /* _ASM_RISCV_MMIO_H */
diff --git a/riscv/include/asm/mmiowb.h b/riscv/include/asm/mmiowb.h
new file mode 100644
index 0000000..0b2333e
--- /dev/null
+++ b/riscv/include/asm/mmiowb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_MMIOWB_H
+#define _ASM_RISCV_MMIOWB_H
+
+/*
+ * "o,w" is sufficient to ensure that all writes to the device have completed
+ * before the write to the spinlock is allowed to commit.
+ */
+#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+
+#include <linux/smp.h>
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_RISCV_MMIOWB_H */
diff --git a/riscv/include/asm/mmu.h b/riscv/include/asm/mmu.h
new file mode 100644
index 0000000..355504b
--- /dev/null
+++ b/riscv/include/asm/mmu.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#ifndef _ASM_RISCV_MMU_H
+#define _ASM_RISCV_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+#ifndef CONFIG_MMU
+ unsigned long end_brk;
+#else
+ atomic_long_t id;
+#endif
+ void *vdso;
+#ifdef CONFIG_SMP
+ /* A local icache flush is needed before user execution can resume. */
+ cpumask_t icache_stale_mask;
+#endif
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
+} mm_context_t;
+
+void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
+ phys_addr_t sz, pgprot_t prot);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_MMU_H */
diff --git a/riscv/include/asm/mmu_context.h b/riscv/include/asm/mmu_context.h
new file mode 100644
index 0000000..7030837
--- /dev/null
+++ b/riscv/include/asm/mmu_context.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_MMU_CONTEXT_H
+#define _ASM_RISCV_MMU_CONTEXT_H
+
+#include <linux/mm_types.h>
+#include <asm-generic/mm_hooks.h>
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task);
+
+#define activate_mm activate_mm
+static inline void activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+ switch_mm(prev, next, NULL);
+}
+
+#define init_new_context init_new_context
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_MMU
+ atomic_long_set(&mm->context.id, 0);
+#endif
+ return 0;
+}
+
+DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/riscv/include/asm/mmzone.h b/riscv/include/asm/mmzone.h
new file mode 100644
index 0000000..fa17e01
--- /dev/null
+++ b/riscv/include/asm/mmzone.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMZONE_H
+#define __ASM_MMZONE_H
+
+#ifdef CONFIG_NUMA
+
+#include <asm/numa.h>
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[(nid)])
+
+#endif /* CONFIG_NUMA */
+#endif /* __ASM_MMZONE_H */
diff --git a/riscv/include/asm/module.h b/riscv/include/asm/module.h
new file mode 100644
index 0000000..0f3baaa
--- /dev/null
+++ b/riscv/include/asm/module.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_RISCV_MODULE_H
+#define _ASM_RISCV_MODULE_H
+
+#include <asm-generic/module.h>
+#include <linux/elf.h>
+
+struct module;
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
+
+#ifdef CONFIG_MODULE_SECTIONS
+struct mod_section {
+ Elf_Shdr *shdr;
+ int num_entries;
+ int max_entries;
+};
+
+struct mod_arch_specific {
+ struct mod_section got;
+ struct mod_section plt;
+ struct mod_section got_plt;
+};
+
+struct got_entry {
+ unsigned long symbol_addr; /* the real variable address */
+};
+
+static inline struct got_entry emit_got_entry(unsigned long val)
+{
+ return (struct got_entry) {val};
+}
+
+static inline struct got_entry *get_got_entry(unsigned long val,
+ const struct mod_section *sec)
+{
+ struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
+ int i;
+ for (i = 0; i < sec->num_entries; i++) {
+ if (got[i].symbol_addr == val)
+ return &got[i];
+ }
+ return NULL;
+}
+
+struct plt_entry {
+ /*
+ * Trampoline code to real target address. The return address
+ * should be the original (pc+4) before entring plt entry.
+ */
+ u32 insn_auipc; /* auipc t0, 0x0 */
+ u32 insn_ld; /* ld t1, 0x10(t0) */
+ u32 insn_jr; /* jr t1 */
+};
+
+#define OPC_AUIPC 0x0017
+#define OPC_LD 0x3003
+#define OPC_JALR 0x0067
+#define REG_T0 0x5
+#define REG_T1 0x6
+
+static inline struct plt_entry emit_plt_entry(unsigned long val,
+ unsigned long plt,
+ unsigned long got_plt)
+{
+ /*
+ * U-Type encoding:
+ * +------------+----------+----------+
+ * | imm[31:12] | rd[11:7] | opc[6:0] |
+ * +------------+----------+----------+
+ *
+ * I-Type encoding:
+ * +------------+------------+--------+----------+----------+
+ * | imm[31:20] | rs1[19:15] | funct3 | rd[11:7] | opc[6:0] |
+ * +------------+------------+--------+----------+----------+
+ *
+ */
+ unsigned long offset = got_plt - plt;
+ u32 hi20 = (offset + 0x800) & 0xfffff000;
+ u32 lo12 = (offset - hi20);
+ return (struct plt_entry) {
+ OPC_AUIPC | (REG_T0 << 7) | hi20,
+ OPC_LD | (lo12 << 20) | (REG_T0 << 15) | (REG_T1 << 7),
+ OPC_JALR | (REG_T1 << 15)
+ };
+}
+
+static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
+{
+ struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
+ int i;
+ for (i = 0; i < sec->num_entries; i++) {
+ if (got_plt[i].symbol_addr == val)
+ return i;
+ }
+ return -1;
+}
+
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+ const struct mod_section *sec_plt,
+ const struct mod_section *sec_got_plt)
+{
+ struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
+ int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
+ if (got_plt_idx >= 0)
+ return plt + got_plt_idx;
+ else
+ return NULL;
+}
+
+#endif /* CONFIG_MODULE_SECTIONS */
+
+static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+
+ return NULL;
+}
+
+#endif /* _ASM_RISCV_MODULE_H */
diff --git a/riscv/include/asm/module.lds.h b/riscv/include/asm/module.lds.h
new file mode 100644
index 0000000..1075bea
--- /dev/null
+++ b/riscv/include/asm/module.lds.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+#ifdef CONFIG_MODULE_SECTIONS
+SECTIONS {
+ .plt : { BYTE(0) }
+ .got : { BYTE(0) }
+ .got.plt : { BYTE(0) }
+}
+#endif
diff --git a/riscv/include/asm/numa.h b/riscv/include/asm/numa.h
new file mode 100644
index 0000000..8c8cf42
--- /dev/null
+++ b/riscv/include/asm/numa.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_NUMA_H
+#define __ASM_NUMA_H
+
+#include <asm/topology.h>
+#include <asm-generic/numa.h>
+
+#endif /* __ASM_NUMA_H */
diff --git a/riscv/include/asm/page.h b/riscv/include/asm/page.h
new file mode 100644
index 0000000..96a08a4
--- /dev/null
+++ b/riscv/include/asm/page.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
+ */
+
+#ifndef _ASM_RISCV_PAGE_H
+#define _ASM_RISCV_PAGE_H
+
+//#include <linux/pfn.h>
+//#include <linux/const.h>
+
+#define PAGE_SHIFT (12)
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory.
+ * When not using MMU this corresponds to the first free page in
+ * physical memory (aligned on a page boundary).
+ */
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET kernel_map.page_offset
+#else
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#endif
+/*
+ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
+ * define the PAGE_OFFSET value for SV48 and SV39.
+ */
+#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
+#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+#else
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#endif /* CONFIG_64BIT */
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_ISA_ZICBOZ
+void clear_page(void *page);
+#else
+#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#endif
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr)
+#define copy_user_page(vto, vfrom, vaddr, topg) \
+ memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * Use struct definitions to apply C type checking
+ */
+
+/* Page Global Directory entry */
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+
+/* Page Table entry */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#ifdef CONFIG_64BIT
+#define PTE_FMT "%016lx"
+#else
+#define PTE_FMT "%08lx"
+#endif
+
+#ifdef CONFIG_64BIT
+/*
+ * We override this value as its generic definition uses __pa too early in
+ * the boot process (before kernel_map.va_pa_offset is set).
+ */
+#define MIN_MEMBLOCK_ADDR 0
+#endif
+
+#ifdef CONFIG_MMU
+#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
+#else
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif /* CONFIG_MMU */
+
+struct kernel_mapping {
+ unsigned long page_offset;
+ unsigned long virt_addr;
+ unsigned long virt_offset;
+ uintptr_t phys_addr;
+ uintptr_t size;
+ /* Offset between linear mapping virtual address and kernel load address */
+ unsigned long va_pa_offset;
+ /* Offset between kernel mapping virtual address and kernel load address */
+ unsigned long va_kernel_pa_offset;
+ unsigned long va_kernel_xip_pa_offset;
+#ifdef CONFIG_XIP_KERNEL
+ uintptr_t xiprom;
+ uintptr_t xiprom_sz;
+#endif
+};
+
+extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
+
+#define is_kernel_mapping(x) \
+ ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+
+#define is_linear_mapping(x) \
+ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
+
+#ifndef CONFIG_DEBUG_VIRTUAL
+#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
+#else
+void *linear_mapping_pa_to_va(unsigned long x);
+#endif
+#define kernel_mapping_pa_to_va(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+ })
+#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
+
+#ifndef CONFIG_DEBUG_VIRTUAL
+#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
+#else
+phys_addr_t linear_mapping_va_to_pa(unsigned long x);
+#endif
+#define kernel_mapping_va_to_pa(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ (_y - kernel_map.va_kernel_xip_pa_offset) : \
+ (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ })
+
+#define __va_to_pa_nodebug(x) ({ \
+ unsigned long _x = x; \
+ is_linear_mapping(_x) ? \
+ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
+ })
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(unsigned long x);
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x) __va_to_pa_nodebug(x)
+#define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
+#endif /* CONFIG_DEBUG_VIRTUAL */
+
+#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
+
+#define phys_to_pfn(phys) (PFN_DOWN(phys))
+#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
+
+#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
+#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
+
+#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
+#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+
+#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
+
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+
+unsigned long kaslr_offset(void);
+
+#endif /* __ASSEMBLY__ */
+
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
+
+//#include <asm-generic/memory_model.h>
+//#include <asm-generic/getorder.h>
+
+#endif /* _ASM_RISCV_PAGE_H */
diff --git a/riscv/include/asm/page.h~ b/riscv/include/asm/page.h~
new file mode 100644
index 0000000..f5a42d6
--- /dev/null
+++ b/riscv/include/asm/page.h~
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
+ */
+
+#ifndef _ASM_RISCV_PAGE_H
+#define _ASM_RISCV_PAGE_H
+
+//#include <linux/pfn.h>
+//#include <linux/const.h>
+
+#define PAGE_SHIFT (12)
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory.
+ * When not using MMU this corresponds to the first free page in
+ * physical memory (aligned on a page boundary).
+ */
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET kernel_map.page_offset
+#else
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#endif
+/*
+ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
+ * define the PAGE_OFFSET value for SV48 and SV39.
+ */
+#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
+#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+#else
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#endif /* CONFIG_64BIT */
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_ISA_ZICBOZ
+void clear_page(void *page);
+#else
+#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#endif
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr)
+#define copy_user_page(vto, vfrom, vaddr, topg) \
+ memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * Use struct definitions to apply C type checking
+ */
+
+/* Page Global Directory entry */
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+
+/* Page Table entry */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#ifdef CONFIG_64BIT
+#define PTE_FMT "%016lx"
+#else
+#define PTE_FMT "%08lx"
+#endif
+
+#ifdef CONFIG_64BIT
+/*
+ * We override this value as its generic definition uses __pa too early in
+ * the boot process (before kernel_map.va_pa_offset is set).
+ */
+#define MIN_MEMBLOCK_ADDR 0
+#endif
+
+#ifdef CONFIG_MMU
+#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
+#else
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif /* CONFIG_MMU */
+
+struct kernel_mapping {
+ unsigned long page_offset;
+ unsigned long virt_addr;
+ unsigned long virt_offset;
+ uintptr_t phys_addr;
+ uintptr_t size;
+ /* Offset between linear mapping virtual address and kernel load address */
+ unsigned long va_pa_offset;
+ /* Offset between kernel mapping virtual address and kernel load address */
+ unsigned long va_kernel_pa_offset;
+ unsigned long va_kernel_xip_pa_offset;
+#ifdef CONFIG_XIP_KERNEL
+ uintptr_t xiprom;
+ uintptr_t xiprom_sz;
+#endif
+};
+
+extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
+
+#define is_kernel_mapping(x) \
+ ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+
+#define is_linear_mapping(x) \
+ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
+
+#ifndef CONFIG_DEBUG_VIRTUAL
+#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
+#else
+void *linear_mapping_pa_to_va(unsigned long x);
+#endif
+#define kernel_mapping_pa_to_va(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+ })
+#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
+
+#ifndef CONFIG_DEBUG_VIRTUAL
+#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
+#else
+phys_addr_t linear_mapping_va_to_pa(unsigned long x);
+#endif
+#define kernel_mapping_va_to_pa(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ (_y - kernel_map.va_kernel_xip_pa_offset) : \
+ (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ })
+
+#define __va_to_pa_nodebug(x) ({ \
+ unsigned long _x = x; \
+ is_linear_mapping(_x) ? \
+ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
+ })
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(unsigned long x);
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x) __va_to_pa_nodebug(x)
+#define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
+#endif /* CONFIG_DEBUG_VIRTUAL */
+
+#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
+
+#define phys_to_pfn(phys) (PFN_DOWN(phys))
+#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
+
+#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
+#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
+
+#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
+#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+
+#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
+
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+
+unsigned long kaslr_offset(void);
+
+#endif /* __ASSEMBLY__ */
+
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_RISCV_PAGE_H */
diff --git a/riscv/include/asm/patch.h b/riscv/include/asm/patch.h
new file mode 100644
index 0000000..e88b52d
--- /dev/null
+++ b/riscv/include/asm/patch.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_PATCH_H
+#define _ASM_RISCV_PATCH_H
+
+int patch_text_nosync(void *addr, const void *insns, size_t len);
+int patch_text_set_nosync(void *addr, u8 c, size_t len);
+int patch_text(void *addr, u32 *insns, int ninsns);
+
+extern int riscv_patch_in_stop_machine;
+
+#endif /* _ASM_RISCV_PATCH_H */
diff --git a/riscv/include/asm/pci.h b/riscv/include/asm/pci.h
new file mode 100644
index 0000000..cc2a184
--- /dev/null
+++ b/riscv/include/asm/pci.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 SiFive
+ */
+
+#ifndef _ASM_RISCV_PCI_H
+#define _ASM_RISCV_PCI_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 4
+#define PCIBIOS_MIN_MEM 16
+
+#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+ return dev_to_node(&bus->dev);
+}
+#ifndef cpumask_of_pcibus
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+#endif
+#endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */
+
+/* Generic PCI */
+#include <asm-generic/pci.h>
+
+#endif /* _ASM_RISCV_PCI_H */
diff --git a/riscv/include/asm/perf_event.h b/riscv/include/asm/perf_event.h
new file mode 100644
index 0000000..665bbc9
--- /dev/null
+++ b/riscv/include/asm/perf_event.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 SiFive
+ * Copyright (C) 2018 Andes Technology Corporation
+ *
+ */
+
+#ifndef _ASM_RISCV_PERF_EVENT_H
+#define _ASM_RISCV_PERF_EVENT_H
+
+#include <linux/perf_event.h>
+#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->epc = (__ip); \
+ (regs)->s0 = (unsigned long) __builtin_frame_address(0); \
+ (regs)->sp = current_stack_pointer; \
+ (regs)->status = SR_PP; \
+}
+#endif /* _ASM_RISCV_PERF_EVENT_H */
diff --git a/riscv/include/asm/pgalloc.h b/riscv/include/asm/pgalloc.h
new file mode 100644
index 0000000..d169a4f
--- /dev/null
+++ b/riscv/include/asm/pgalloc.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGALLOC_H
+#define _ASM_RISCV_PGALLOC_H
+
+#include <linux/mm.h>
+#include <asm/tlb.h>
+
+#ifdef CONFIG_MMU
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_FREE
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ unsigned long pfn = virt_to_pfn(pte);
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+static inline void pmd_populate(struct mm_struct *mm,
+ pmd_t *pmd, pgtable_t pte)
+{
+ unsigned long pfn = virt_to_pfn(page_address(pte));
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ unsigned long pfn = virt_to_pfn(pmd);
+
+ set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ if (pgtable_l4_enabled) {
+ unsigned long pfn = virt_to_pfn(pud);
+
+ set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
+ pud_t *pud)
+{
+ if (pgtable_l4_enabled) {
+ unsigned long pfn = virt_to_pfn(pud);
+
+ set_p4d_safe(p4d,
+ __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
+{
+ if (pgtable_l5_enabled) {
+ unsigned long pfn = virt_to_pfn(p4d);
+
+ set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
+ p4d_t *p4d)
+{
+ if (pgtable_l5_enabled) {
+ unsigned long pfn = virt_to_pfn(p4d);
+
+ set_pgd_safe(pgd,
+ __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+#define pud_alloc_one pud_alloc_one
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ if (pgtable_l4_enabled)
+ return __pud_alloc_one(mm, addr);
+
+ return NULL;
+}
+
+#define pud_free pud_free
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (pgtable_l4_enabled)
+ __pud_free(mm, pud);
+}
+
+#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
+
+#define p4d_alloc_one p4d_alloc_one
+static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ if (pgtable_l5_enabled) {
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (p4d_t *)get_zeroed_page(gfp);
+ }
+
+ return NULL;
+}
+
+static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
+ free_page((unsigned long)p4d);
+}
+
+#define p4d_free p4d_free
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (pgtable_l5_enabled)
+ __p4d_free(mm, p4d);
+}
+
+#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+static inline void sync_kernel_mappings(pgd_t *pgd)
+{
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd;
+
+ pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ if (likely(pgd != NULL)) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ /* Copy kernel mappings */
+ sync_kernel_mappings(pgd);
+ }
+ return pgd;
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+#define __pte_free_tlb(tlb, pte, buf) \
+do { \
+ pagetable_pte_dtor(page_ptdesc(pte)); \
+ tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));\
+} while (0)
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/riscv/include/asm/pgtable-32.h b/riscv/include/asm/pgtable-32.h
new file mode 100644
index 0000000..00f3369
--- /dev/null
+++ b/riscv/include/asm/pgtable-32.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_32_H
+#define _ASM_RISCV_PGTABLE_32_H
+
+#include <asm-generic/pgtable-nopmd.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+
+/* Size of region mapped by a page global directory */
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 34
+
+/*
+ * rv32 PTE format:
+ * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(31, 10)
+
+#define _PAGE_NOCACHE 0
+#define _PAGE_IO 0
+#define _PAGE_MTMASK 0
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL))
+
+static const __maybe_unused int pgtable_l4_enabled;
+static const __maybe_unused int pgtable_l5_enabled;
+
+#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/riscv/include/asm/pgtable-64.h b/riscv/include/asm/pgtable-64.h
new file mode 100644
index 0000000..9a2c780
--- /dev/null
+++ b/riscv/include/asm/pgtable-64.h
@@ -0,0 +1,415 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_64_H
+#define _ASM_RISCV_PGTABLE_64_H
+
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <asm/errata_list.h>
+
+extern bool pgtable_l4_enabled;
+extern bool pgtable_l5_enabled;
+
+#define PGDIR_SHIFT_L3 30
+#define PGDIR_SHIFT_L4 39
+#define PGDIR_SHIFT_L5 48
+#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3)
+
+#define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
+ (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
+/* Size of region mapped by a page global directory */
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+/* p4d is folded into pgd in case of 4-level page table */
+#define P4D_SHIFT_L3 30
+#define P4D_SHIFT_L4 39
+#define P4D_SHIFT_L5 39
+#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
+ (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
+#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE - 1))
+
+/* pud is folded into pgd in case of 3-level page table */
+#define PUD_SHIFT 30
+#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+
+#define PMD_SHIFT 21
+/* Size of region mapped by a page middle directory */
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+
+/* Page 4th Directory entry */
+typedef struct {
+ unsigned long p4d;
+} p4d_t;
+
+#define p4d_val(x) ((x).p4d)
+#define __p4d(x) ((p4d_t) { (x) })
+#define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t))
+
+/* Page Upper Directory entry */
+typedef struct {
+ unsigned long pud;
+} pud_t;
+
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+#define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
+
+/* Page Middle Directory entry */
+typedef struct {
+ unsigned long pmd;
+} pmd_t;
+
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+
+/*
+ * rv64 PTE format:
+ * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * N MT RSV PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(53, 10)
+
+/*
+ * [63] Svnapot definitions:
+ * 0 Svnapot disabled
+ * 1 Svnapot enabled
+ */
+#define _PAGE_NAPOT_SHIFT 63
+#define _PAGE_NAPOT BIT(_PAGE_NAPOT_SHIFT)
+/*
+ * Only 64KB (order 4) napot ptes supported.
+ */
+#define NAPOT_CONT_ORDER_BASE 4
+enum napot_cont_order {
+ NAPOT_CONT64KB_ORDER = NAPOT_CONT_ORDER_BASE,
+ NAPOT_ORDER_MAX,
+};
+
+#define for_each_napot_order(order) \
+ for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++)
+#define for_each_napot_order_rev(order) \
+ for (order = NAPOT_ORDER_MAX - 1; \
+ order >= NAPOT_CONT_ORDER_BASE; order--)
+#define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1))
+
+#define napot_cont_shift(order) ((order) + PAGE_SHIFT)
+#define napot_cont_size(order) BIT(napot_cont_shift(order))
+#define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL))
+#define napot_pte_num(order) BIT(order)
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#define HUGE_MAX_HSTATE (2 + (NAPOT_ORDER_MAX - NAPOT_CONT_ORDER_BASE))
+#else
+#define HUGE_MAX_HSTATE 2
+#endif
+
+/*
+ * [62:61] Svpbmt Memory Type definitions:
+ *
+ * 00 - PMA Normal Cacheable, No change to implied PMA memory type
+ * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
+ * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
+ * 11 - Rsvd Reserved for future standard use
+ */
+#define _PAGE_NOCACHE_SVPBMT (1UL << 61)
+#define _PAGE_IO_SVPBMT (1UL << 62)
+#define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
+
+/*
+ * [63:59] T-Head Memory Type definitions:
+ * bit[63] SO - Strong Order
+ * bit[62] C - Cacheable
+ * bit[61] B - Bufferable
+ * bit[60] SH - Shareable
+ * bit[59] Sec - Trustable
+ * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable
+ * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
+ * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
+ */
+#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
+#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60))
+#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
+#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
+
+static inline u64 riscv_page_mtmask(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_MTMASK);
+ return val;
+}
+
+static inline u64 riscv_page_nocache(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_NOCACHE);
+ return val;
+}
+
+static inline u64 riscv_page_io(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_IO);
+ return val;
+}
+
+#define _PAGE_NOCACHE riscv_page_nocache()
+#define _PAGE_IO riscv_page_io()
+#define _PAGE_MTMASK riscv_page_mtmask()
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL | \
+ _PAGE_MTMASK))
+
+static inline int pud_present(pud_t pud)
+{
+ return (pud_val(pud) & _PAGE_PRESENT);
+}
+
+static inline int pud_none(pud_t pud)
+{
+ return (pud_val(pud) == 0);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return !pud_present(pud);
+}
+
+#define pud_leaf pud_leaf
+static inline int pud_leaf(pud_t pud)
+{
+ return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
+}
+
+static inline int pud_user(pud_t pud)
+{
+ return pud_val(pud) & _PAGE_USER;
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+ *pudp = pud;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+}
+
+static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
+{
+ return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _pud_pfn(pud_t pud)
+{
+ return __page_val_to_pfn(pud_val(pud));
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
+}
+
+static inline struct page *pud_page(pud_t pud)
+{
+ return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
+}
+
+#define mm_p4d_folded mm_p4d_folded
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+ if (pgtable_l5_enabled)
+ return false;
+
+ return true;
+}
+
+#define mm_pud_folded mm_pud_folded
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+ if (pgtable_l4_enabled)
+ return false;
+
+ return true;
+}
+
+#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+
+static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+static inline unsigned long _pmd_pfn(pmd_t pmd)
+{
+ return __page_val_to_pfn(pmd_val(pmd));
+}
+
+#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
+
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+
+#define p4d_ERROR(e) \
+ pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
+
+static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ *p4dp = p4d;
+ else
+ set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
+}
+
+static inline int p4d_none(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (p4d_val(p4d) == 0);
+
+ return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (p4d_val(p4d) & _PAGE_PRESENT);
+
+ return 1;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return !p4d_present(p4d);
+
+ return 0;
+}
+
+static inline void p4d_clear(p4d_t *p4d)
+{
+ if (pgtable_l4_enabled)
+ set_p4d(p4d, __p4d(0));
+}
+
+static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
+{
+ return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _p4d_pfn(p4d_t p4d)
+{
+ return __page_val_to_pfn(p4d_val(p4d));
+}
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
+
+ return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
+}
+#define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d))
+
+static inline struct page *p4d_page(p4d_t p4d)
+{
+ return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
+}
+
+#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+#define pud_offset pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if (pgtable_l4_enabled)
+ return p4d_pgtable(*p4d) + pud_index(address);
+
+ return (pud_t *)p4d;
+}
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ *pgdp = pgd;
+ else
+ set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return (pgd_val(pgd) == 0);
+
+ return 0;
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return (pgd_val(pgd) & _PAGE_PRESENT);
+
+ return 1;
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return !pgd_present(pgd);
+
+ return 0;
+}
+
+static inline void pgd_clear(pgd_t *pgd)
+{
+ if (pgtable_l5_enabled)
+ set_pgd(pgd, __pgd(0));
+}
+
+static inline p4d_t *pgd_pgtable(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd)));
+
+ return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
+}
+#define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd))
+
+static inline struct page *pgd_page(pgd_t pgd)
+{
+ return pfn_to_page(__page_val_to_pfn(pgd_val(pgd)));
+}
+#define pgd_page(pgd) pgd_page(pgd)
+
+#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+#define p4d_offset p4d_offset
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ if (pgtable_l5_enabled)
+ return pgd_pgtable(*pgd) + p4d_index(address);
+
+ return (p4d_t *)pgd;
+}
+
+#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/riscv/include/asm/pgtable-bits.h b/riscv/include/asm/pgtable-bits.h
new file mode 100644
index 0000000..179bd4a
--- /dev/null
+++ b/riscv/include/asm/pgtable-bits.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_BITS_H
+#define _ASM_RISCV_PGTABLE_BITS_H
+
+#define _PAGE_ACCESSED_OFFSET 6
+
+#define _PAGE_PRESENT (1 << 0)
+#define _PAGE_READ (1 << 1) /* Readable */
+#define _PAGE_WRITE (1 << 2) /* Writable */
+#define _PAGE_EXEC (1 << 3) /* Executable */
+#define _PAGE_USER (1 << 4) /* User */
+#define _PAGE_GLOBAL (1 << 5) /* Global */
+#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
+#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
+#define _PAGE_SOFT (3 << 8) /* Reserved for software */
+
+#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */
+#define _PAGE_TABLE _PAGE_PRESENT
+
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_GLOBAL
+
+/* Used for swap PTEs only. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED
+
+#define _PAGE_PFN_SHIFT 10
+
+/*
+ * when all of R/W/X are zero, the PTE is a pointer to the next level
+ * of the page table; otherwise, it is a leaf PTE.
+ */
+#define _PAGE_LEAF (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
+
+#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/riscv/include/asm/pgtable.h b/riscv/include/asm/pgtable.h
new file mode 100644
index 0000000..0a073d4
--- /dev/null
+++ b/riscv/include/asm/pgtable.h
@@ -0,0 +1,931 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_H
+#define _ASM_RISCV_PGTABLE_H
+
+//#include <linux/mmzone.h>
+//#include <linux/sizes.h>
+
+#include <asm/pgtable-bits.h>
+
+#ifndef CONFIG_MMU
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#define KERN_VIRT_SIZE (UL(-1))
+#else
+
+#define ADDRESS_SPACE_END (UL(-1))
+
+#ifdef CONFIG_64BIT
+/* Leave 2GB for kernel and BPF at the end of the address space */
+#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
+#else
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#endif
+
+/* Number of entries in the page global directory */
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+/* Number of entries in the page table */
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+
+/*
+ * Half of the kernel address space (1/4 of the entries of the page global
+ * directory) is for the direct mapping.
+ */
+#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
+
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END PAGE_OFFSET
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+
+#define BPF_JIT_REGION_SIZE (SZ_128M)
+#ifdef CONFIG_64BIT
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
+#else
+#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (VMALLOC_END)
+#endif
+
+/* Modules always live before the kernel */
+#ifdef CONFIG_64BIT
+/* This is used to define the end of the KASAN shadow region */
+#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
+#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
+#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
+#endif
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VA_BITS_SV32 32
+#ifdef CONFIG_64BIT
+#define VA_BITS_SV39 39
+#define VA_BITS_SV48 48
+#define VA_BITS_SV57 57
+
+#define VA_BITS (pgtable_l5_enabled ? \
+ VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
+#else
+#define VA_BITS VA_BITS_SV32
+#endif
+
+#define VMEMMAP_SHIFT \
+ (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END VMALLOC_START
+#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
+
+/*
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+#define PCI_IO_SIZE SZ_16M
+#define PCI_IO_END VMEMMAP_START
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+
+#define FIXADDR_TOP PCI_IO_START
+#ifdef CONFIG_64BIT
+#define MAX_FDT_SIZE PMD_SIZE
+#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
+#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
+#else
+#define MAX_FDT_SIZE PGDIR_SIZE
+#define FIX_FDT_SIZE MAX_FDT_SIZE
+#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#endif
+
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_OFFSET SZ_32M
+#define XIP_OFFSET_MASK (SZ_32M - 1)
+#else
+#define XIP_OFFSET 0
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include <linux/mm_types.h>
+#include <asm/compat.h>
+
+#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
+
+#ifdef CONFIG_64BIT
+#include <asm/pgtable-64.h>
+
+#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
+#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
+#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
+
+#ifdef CONFIG_COMPAT
+#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
+#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
+#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
+#else
+#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
+#endif /* CONFIG_COMPAT */
+
+#else
+#include <asm/pgtable-32.h>
+#endif /* CONFIG_64BIT */
+
+#include <linux/page_table_check.h>
+
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_FIXUP(addr) ({ \
+ uintptr_t __a = (uintptr_t)(addr); \
+ (__a >= CONFIG_XIP_PHYS_ADDR && \
+ __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
+ __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
+ __a; \
+ })
+#else
+#define XIP_FIXUP(addr) (addr)
+#endif /* CONFIG_XIP_KERNEL */
+
+struct pt_alloc_ops {
+ pte_t *(*get_pte_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pte)(uintptr_t va);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t *(*get_pmd_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pmd)(uintptr_t va);
+ pud_t *(*get_pud_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pud)(uintptr_t va);
+ p4d_t *(*get_p4d_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_p4d)(uintptr_t va);
+#endif
+};
+
+extern struct pt_alloc_ops pt_ops __initdata;
+
+#ifdef CONFIG_MMU
+/* Number of PGD entries that a user-mode program can use */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/* Page protection bits */
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
+
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
+#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
+ _PAGE_EXEC | _PAGE_WRITE)
+
+#define PAGE_COPY PAGE_READ
+#define PAGE_COPY_EXEC PAGE_READ_EXEC
+#define PAGE_SHARED PAGE_WRITE
+#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
+
+#define _PAGE_KERNEL (_PAGE_READ \
+ | _PAGE_WRITE \
+ | _PAGE_PRESENT \
+ | _PAGE_ACCESSED \
+ | _PAGE_DIRTY \
+ | _PAGE_GLOBAL)
+
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
+#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
+ | _PAGE_EXEC)
+
+#define PAGE_TABLE __pgprot(_PAGE_TABLE)
+
+#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
+#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
+
+extern pgd_t swapper_pg_dir[];
+extern pgd_t trampoline_pg_dir[];
+extern pgd_t early_pg_dir[];
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_present(pmd_t pmd)
+{
+ /*
+ * Checking for _PAGE_LEAF is needed too because:
+ * When splitting a THP, split_huge_page() will temporarily clear
+ * the present bit, in this situation, pmd_present() and
+ * pmd_trans_huge() still needs to return true.
+ */
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
+}
+#else
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+#endif
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) == 0);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
+}
+
+#define pmd_leaf pmd_leaf
+static inline int pmd_leaf(pmd_t pmd)
+{
+ return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd(0));
+}
+
+static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+static inline unsigned long _pgd_pfn(pgd_t pgd)
+{
+ return __page_val_to_pfn(pgd_val(pgd));
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
+}
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte(pud_val(pud));
+}
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#include <asm/cpufeature.h>
+
+static __always_inline bool has_svnapot(void)
+{
+ return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
+}
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_NAPOT;
+}
+
+static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
+{
+ int pos = order - 1 + _PAGE_PFN_SHIFT;
+ unsigned long napot_bit = BIT(pos);
+ unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+
+ return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
+}
+
+#else
+
+static __always_inline bool has_svnapot(void) { return false; }
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return 0;
+}
+
+#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+
+/* Yields the page frame number (PFN) of a page table entry */
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ unsigned long res = __page_val_to_pfn(pte_val(pte));
+
+ if (has_svnapot() && pte_napot(pte))
+ res = res & (res - 1UL);
+
+ return res;
+}
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+/* Constructs a page table entry */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline int pte_present(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+
+static inline int pte_none(pte_t pte)
+{
+ return (pte_val(pte) == 0);
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_WRITE;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_EXEC;
+}
+
+static inline int pte_user(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_USER;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SPECIAL;
+}
+
+/* static inline pte_t pte_rdprotect(pte_t pte) */
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_WRITE));
+}
+
+/* static inline pte_t pte_mkread(pte_t pte) */
+
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_WRITE);
+}
+
+/* static inline pte_t pte_mkexec(pte_t pte) */
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return pte;
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * See the comment in include/asm-generic/pgtable.h
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return pte_protnone(pmd_pte(pmd));
+}
+#endif
+
+/* Modify page protection bits */
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ unsigned long newprot_val = pgprot_val(newprot);
+
+ ALT_THEAD_PMA(newprot_val);
+
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
+}
+
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
+
+
+/* Commit new configuration to MMU hardware */
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+ /*
+ * The kernel assumes that TLBs don't cache invalid entries, but
+ * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
+ * cache flush; it is necessary even after writing invalid entries.
+ * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ * the extra traps reduce performance. So, eagerly SFENCE.VMA.
+ */
+ while (nr--)
+ local_flush_tlb_page(address + nr * PAGE_SIZE);
+}
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pte_t *ptep = (pte_t *)pmdp;
+
+ update_mmu_cache(vma, address, ptep);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified. Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
+{
+ if (pte_present(pteval) && pte_exec(pteval))
+ flush_icache_pte(pteval);
+
+ set_pte(ptep, pteval);
+}
+
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr)
+{
+ page_table_check_ptes_set(mm, ptep, pteval, nr);
+
+ for (;;) {
+ __set_pte_at(ptep, pteval);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
+ }
+}
+#define set_ptes set_ptes
+
+static inline void pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ __set_pte_at(ptep, __pte(0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ __set_pte_at(ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+
+ page_table_check_pte_clear(mm, pte);
+
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * This comment is borrowed from x86, but applies equally to RISC-V:
+ *
+ * Clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ */
+ return ptep_test_and_clear_young(vma, address, ptep);
+}
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_IO;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_NOCACHE;
+
+ return __pgprot(prot);
+}
+
+/*
+ * THP functions
+ */
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
+}
+
+#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
+}
+
+#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ return pte_write(pmd_pte(pmd));
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return pte_dirty(pmd_pte(pmd));
+}
+
+#define pmd_young pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return pte_young(pmd_pte(pmd));
+}
+
+static inline int pmd_user(pmd_t pmd)
+{
+ return pte_user(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ return pte_pmd(pte_mkold(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
+{
+ return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ return pte_pmd(pte_mkclean(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ page_table_check_pmd_set(mm, pmdp, pmd);
+ return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
+}
+
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
+{
+ page_table_check_pud_set(mm, pudp, pud);
+ return __set_pte_at((pte_t *)pudp, pud_pte(pud));
+}
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+ return pte_present(pte) && pte_user(pte);
+}
+
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+ return pmd_leaf(pmd) && pmd_user(pmd);
+}
+
+static inline bool pud_user_accessible_page(pud_t pud)
+{
+ return pud_leaf(pud) && pud_user(pud);
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_leaf(pmd);
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
+}
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
+
+ page_table_check_pmd_clear(mm, pmd);
+
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
+}
+
+#define pmdp_collapse_flush pmdp_collapse_flush
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_PRESENT (zero)
+ * bit 1 to 3: _PAGE_LEAF (zero)
+ * bit 5: _PAGE_PROT_NONE (zero)
+ * bit 6: exclusive marker
+ * bits 7 to 11: swap type
+ * bits 12 to XLEN-1: swap offset
+ */
+#define __SWP_TYPE_SHIFT 7
+#define __SWP_TYPE_BITS 5
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) \
+ { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(swp) __pmd((swp).val)
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+/*
+ * In the RV64 Linux scheme, we give the user half of the virtual-address space
+ * and give the kernel the other (upper) half.
+ */
+#ifdef CONFIG_64BIT
+#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
+#else
+#define KERN_VIRT_START FIXADDR_START
+#endif
+
+/*
+ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
+ * Task size is:
+ * - 0x9fc00000 (~2.5GB) for RV32.
+ * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
+ * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
+ * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
+ *
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
+ * Instruction Set Manual Volume II: Privileged Architecture" states that
+ * "load and store effective addresses, which are 64bits, must have bits
+ * 63–48 all equal to bit 47, or else a page-fault exception will occur."
+ * Similarly for SV57, bits 63–57 must be equal to bit 56.
+ */
+#ifdef CONFIG_64BIT
+#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 (_AC(0x80000000, UL))
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif
+
+#else
+#define TASK_SIZE FIXADDR_START
+#define TASK_SIZE_MIN TASK_SIZE
+#endif
+
+#else /* CONFIG_MMU */
+
+#define PAGE_SHARED __pgprot(0)
+#define PAGE_KERNEL __pgprot(0)
+#define swapper_pg_dir NULL
+#define TASK_SIZE 0xffffffffUL
+#define VMALLOC_START _AC(0, UL)
+#define VMALLOC_END TASK_SIZE
+
+#endif /* !CONFIG_MMU */
+
+extern char _start[];
+extern void *_dtb_early_va;
+extern uintptr_t _dtb_early_pa;
+#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
+#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
+#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
+#else
+#define dtb_early_va _dtb_early_va
+#define dtb_early_pa _dtb_early_pa
+#endif /* CONFIG_XIP_KERNEL */
+extern u64 satp_mode;
+
+void paging_init(void);
+void misc_mem_init(void);
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero,
+ * used for zero-mapped memory areas, etc.
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/riscv/include/asm/pgtable.h~ b/riscv/include/asm/pgtable.h~
new file mode 100644
index 0000000..74ffb21
--- /dev/null
+++ b/riscv/include/asm/pgtable.h~
@@ -0,0 +1,931 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_H
+#define _ASM_RISCV_PGTABLE_H
+
+#include <linux/mmzone.h>
+#include <linux/sizes.h>
+
+#include <asm/pgtable-bits.h>
+
+#ifndef CONFIG_MMU
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#define KERN_VIRT_SIZE (UL(-1))
+#else
+
+#define ADDRESS_SPACE_END (UL(-1))
+
+#ifdef CONFIG_64BIT
+/* Leave 2GB for kernel and BPF at the end of the address space */
+#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
+#else
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#endif
+
+/* Number of entries in the page global directory */
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+/* Number of entries in the page table */
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+
+/*
+ * Half of the kernel address space (1/4 of the entries of the page global
+ * directory) is for the direct mapping.
+ */
+#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
+
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END PAGE_OFFSET
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+
+#define BPF_JIT_REGION_SIZE (SZ_128M)
+#ifdef CONFIG_64BIT
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
+#else
+#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (VMALLOC_END)
+#endif
+
+/* Modules always live before the kernel */
+#ifdef CONFIG_64BIT
+/* This is used to define the end of the KASAN shadow region */
+#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
+#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
+#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
+#endif
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VA_BITS_SV32 32
+#ifdef CONFIG_64BIT
+#define VA_BITS_SV39 39
+#define VA_BITS_SV48 48
+#define VA_BITS_SV57 57
+
+#define VA_BITS (pgtable_l5_enabled ? \
+ VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
+#else
+#define VA_BITS VA_BITS_SV32
+#endif
+
+#define VMEMMAP_SHIFT \
+ (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END VMALLOC_START
+#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
+
+/*
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+#define PCI_IO_SIZE SZ_16M
+#define PCI_IO_END VMEMMAP_START
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+
+#define FIXADDR_TOP PCI_IO_START
+#ifdef CONFIG_64BIT
+#define MAX_FDT_SIZE PMD_SIZE
+#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
+#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
+#else
+#define MAX_FDT_SIZE PGDIR_SIZE
+#define FIX_FDT_SIZE MAX_FDT_SIZE
+#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#endif
+
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_OFFSET SZ_32M
+#define XIP_OFFSET_MASK (SZ_32M - 1)
+#else
+#define XIP_OFFSET 0
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include <linux/mm_types.h>
+#include <asm/compat.h>
+
+#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
+
+#ifdef CONFIG_64BIT
+#include <asm/pgtable-64.h>
+
+#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
+#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
+#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
+
+#ifdef CONFIG_COMPAT
+#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
+#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
+#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
+#else
+#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
+#endif /* CONFIG_COMPAT */
+
+#else
+#include <asm/pgtable-32.h>
+#endif /* CONFIG_64BIT */
+
+#include <linux/page_table_check.h>
+
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_FIXUP(addr) ({ \
+ uintptr_t __a = (uintptr_t)(addr); \
+ (__a >= CONFIG_XIP_PHYS_ADDR && \
+ __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
+ __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
+ __a; \
+ })
+#else
+#define XIP_FIXUP(addr) (addr)
+#endif /* CONFIG_XIP_KERNEL */
+
+struct pt_alloc_ops {
+ pte_t *(*get_pte_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pte)(uintptr_t va);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t *(*get_pmd_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pmd)(uintptr_t va);
+ pud_t *(*get_pud_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pud)(uintptr_t va);
+ p4d_t *(*get_p4d_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_p4d)(uintptr_t va);
+#endif
+};
+
+extern struct pt_alloc_ops pt_ops __initdata;
+
+#ifdef CONFIG_MMU
+/* Number of PGD entries that a user-mode program can use */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/* Page protection bits */
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
+
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
+#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
+ _PAGE_EXEC | _PAGE_WRITE)
+
+#define PAGE_COPY PAGE_READ
+#define PAGE_COPY_EXEC PAGE_READ_EXEC
+#define PAGE_SHARED PAGE_WRITE
+#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
+
+#define _PAGE_KERNEL (_PAGE_READ \
+ | _PAGE_WRITE \
+ | _PAGE_PRESENT \
+ | _PAGE_ACCESSED \
+ | _PAGE_DIRTY \
+ | _PAGE_GLOBAL)
+
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
+#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
+ | _PAGE_EXEC)
+
+#define PAGE_TABLE __pgprot(_PAGE_TABLE)
+
+#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
+#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
+
+extern pgd_t swapper_pg_dir[];
+extern pgd_t trampoline_pg_dir[];
+extern pgd_t early_pg_dir[];
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_present(pmd_t pmd)
+{
+ /*
+ * Checking for _PAGE_LEAF is needed too because:
+ * When splitting a THP, split_huge_page() will temporarily clear
+ * the present bit, in this situation, pmd_present() and
+ * pmd_trans_huge() still needs to return true.
+ */
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
+}
+#else
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+#endif
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) == 0);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
+}
+
+#define pmd_leaf pmd_leaf
+static inline int pmd_leaf(pmd_t pmd)
+{
+ return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd(0));
+}
+
+static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+static inline unsigned long _pgd_pfn(pgd_t pgd)
+{
+ return __page_val_to_pfn(pgd_val(pgd));
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
+}
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte(pud_val(pud));
+}
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#include <asm/cpufeature.h>
+
+static __always_inline bool has_svnapot(void)
+{
+ return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
+}
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_NAPOT;
+}
+
+static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
+{
+ int pos = order - 1 + _PAGE_PFN_SHIFT;
+ unsigned long napot_bit = BIT(pos);
+ unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+
+ return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
+}
+
+#else
+
+static __always_inline bool has_svnapot(void) { return false; }
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return 0;
+}
+
+#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+
+/* Yields the page frame number (PFN) of a page table entry */
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ unsigned long res = __page_val_to_pfn(pte_val(pte));
+
+ if (has_svnapot() && pte_napot(pte))
+ res = res & (res - 1UL);
+
+ return res;
+}
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+/* Constructs a page table entry */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline int pte_present(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+
+static inline int pte_none(pte_t pte)
+{
+ return (pte_val(pte) == 0);
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_WRITE;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_EXEC;
+}
+
+static inline int pte_user(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_USER;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SPECIAL;
+}
+
+/* static inline pte_t pte_rdprotect(pte_t pte) */
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_WRITE));
+}
+
+/* static inline pte_t pte_mkread(pte_t pte) */
+
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_WRITE);
+}
+
+/* static inline pte_t pte_mkexec(pte_t pte) */
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return pte;
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * See the comment in include/asm-generic/pgtable.h
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return pte_protnone(pmd_pte(pmd));
+}
+#endif
+
+/* Modify page protection bits */
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ unsigned long newprot_val = pgprot_val(newprot);
+
+ ALT_THEAD_PMA(newprot_val);
+
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
+}
+
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
+
+
+/* Commit new configuration to MMU hardware */
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+ /*
+ * The kernel assumes that TLBs don't cache invalid entries, but
+ * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
+ * cache flush; it is necessary even after writing invalid entries.
+ * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ * the extra traps reduce performance. So, eagerly SFENCE.VMA.
+ */
+ while (nr--)
+ local_flush_tlb_page(address + nr * PAGE_SIZE);
+}
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pte_t *ptep = (pte_t *)pmdp;
+
+ update_mmu_cache(vma, address, ptep);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified. Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
+{
+ if (pte_present(pteval) && pte_exec(pteval))
+ flush_icache_pte(pteval);
+
+ set_pte(ptep, pteval);
+}
+
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr)
+{
+ page_table_check_ptes_set(mm, ptep, pteval, nr);
+
+ for (;;) {
+ __set_pte_at(ptep, pteval);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
+ }
+}
+#define set_ptes set_ptes
+
+static inline void pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ __set_pte_at(ptep, __pte(0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ __set_pte_at(ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+
+ page_table_check_pte_clear(mm, pte);
+
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * This comment is borrowed from x86, but applies equally to RISC-V:
+ *
+ * Clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ */
+ return ptep_test_and_clear_young(vma, address, ptep);
+}
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_IO;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_NOCACHE;
+
+ return __pgprot(prot);
+}
+
+/*
+ * THP functions
+ */
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
+}
+
+#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
+}
+
+#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ return pte_write(pmd_pte(pmd));
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return pte_dirty(pmd_pte(pmd));
+}
+
+#define pmd_young pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return pte_young(pmd_pte(pmd));
+}
+
+static inline int pmd_user(pmd_t pmd)
+{
+ return pte_user(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ return pte_pmd(pte_mkold(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
+{
+ return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ return pte_pmd(pte_mkclean(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ page_table_check_pmd_set(mm, pmdp, pmd);
+ return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
+}
+
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
+{
+ page_table_check_pud_set(mm, pudp, pud);
+ return __set_pte_at((pte_t *)pudp, pud_pte(pud));
+}
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+ return pte_present(pte) && pte_user(pte);
+}
+
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+ return pmd_leaf(pmd) && pmd_user(pmd);
+}
+
+static inline bool pud_user_accessible_page(pud_t pud)
+{
+ return pud_leaf(pud) && pud_user(pud);
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_leaf(pmd);
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
+}
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
+
+ page_table_check_pmd_clear(mm, pmd);
+
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
+}
+
+#define pmdp_collapse_flush pmdp_collapse_flush
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_PRESENT (zero)
+ * bit 1 to 3: _PAGE_LEAF (zero)
+ * bit 5: _PAGE_PROT_NONE (zero)
+ * bit 6: exclusive marker
+ * bits 7 to 11: swap type
+ * bits 12 to XLEN-1: swap offset
+ */
+#define __SWP_TYPE_SHIFT 7
+#define __SWP_TYPE_BITS 5
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) \
+ { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(swp) __pmd((swp).val)
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+/*
+ * In the RV64 Linux scheme, we give the user half of the virtual-address space
+ * and give the kernel the other (upper) half.
+ */
+#ifdef CONFIG_64BIT
+#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
+#else
+#define KERN_VIRT_START FIXADDR_START
+#endif
+
+/*
+ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
+ * Task size is:
+ * - 0x9fc00000 (~2.5GB) for RV32.
+ * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
+ * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
+ * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
+ *
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
+ * Instruction Set Manual Volume II: Privileged Architecture" states that
+ * "load and store effective addresses, which are 64bits, must have bits
+ * 63–48 all equal to bit 47, or else a page-fault exception will occur."
+ * Similarly for SV57, bits 63–57 must be equal to bit 56.
+ */
+#ifdef CONFIG_64BIT
+#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 (_AC(0x80000000, UL))
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif
+
+#else
+#define TASK_SIZE FIXADDR_START
+#define TASK_SIZE_MIN TASK_SIZE
+#endif
+
+#else /* CONFIG_MMU */
+
+#define PAGE_SHARED __pgprot(0)
+#define PAGE_KERNEL __pgprot(0)
+#define swapper_pg_dir NULL
+#define TASK_SIZE 0xffffffffUL
+#define VMALLOC_START _AC(0, UL)
+#define VMALLOC_END TASK_SIZE
+
+#endif /* !CONFIG_MMU */
+
+extern char _start[];
+extern void *_dtb_early_va;
+extern uintptr_t _dtb_early_pa;
+#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
+#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
+#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
+#else
+#define dtb_early_va _dtb_early_va
+#define dtb_early_pa _dtb_early_pa
+#endif /* CONFIG_XIP_KERNEL */
+extern u64 satp_mode;
+
+void paging_init(void);
+void misc_mem_init(void);
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero,
+ * used for zero-mapped memory areas, etc.
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/riscv/include/asm/probes.h b/riscv/include/asm/probes.h
new file mode 100644
index 0000000..a787e6d
--- /dev/null
+++ b/riscv/include/asm/probes.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_PROBES_H
+#define _ASM_RISCV_PROBES_H
+
+typedef u32 probe_opcode_t;
+typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_probe_insn {
+ probe_opcode_t *insn;
+ probes_handler_t *handler;
+ /* restore address after simulation */
+ unsigned long restore;
+};
+
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
+
+#endif /* _ASM_RISCV_PROBES_H */
diff --git a/riscv/include/asm/processor.h b/riscv/include/asm/processor.h
new file mode 100644
index 0000000..e1944ff
--- /dev/null
+++ b/riscv/include/asm/processor.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#include <linux/const.h>
+#include <linux/cache.h>
+#include <linux/prctl.h>
+
+#include <vdso/processor.h>
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_64BIT
+#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+#define STACK_TOP_MAX TASK_SIZE
+
+#define arch_get_mmap_end(addr, len, flags) \
+({ \
+ unsigned long mmap_end; \
+ typeof(addr) _addr = (addr); \
+ if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ mmap_end = STACK_TOP_MAX; \
+ else if ((_addr) >= VA_USER_SV57) \
+ mmap_end = STACK_TOP_MAX; \
+ else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
+ mmap_end = VA_USER_SV48; \
+ else \
+ mmap_end = VA_USER_SV39; \
+ mmap_end; \
+})
+
+#define arch_get_mmap_base(addr, base) \
+({ \
+ unsigned long mmap_base; \
+ typeof(addr) _addr = (addr); \
+ typeof(base) _base = (base); \
+ unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
+ if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ mmap_base = (_base); \
+ else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
+ mmap_base = VA_USER_SV57 - rnd_gap; \
+ else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
+ mmap_base = VA_USER_SV48 - rnd_gap; \
+ else \
+ mmap_base = VA_USER_SV39 - rnd_gap; \
+ mmap_base; \
+})
+
+#else
+#define DEFAULT_MAP_WINDOW TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+#endif
+#define STACK_ALIGN 16
+
+#define STACK_TOP DEFAULT_MAP_WINDOW
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#ifdef CONFIG_64BIT
+#define TASK_UNMAPPED_BASE PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3)
+#else
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+#endif
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct pt_regs;
+
+/* CPU-specific state of a task */
+struct thread_struct {
+ /* Callee-saved registers */
+ unsigned long ra;
+ unsigned long sp; /* Kernel mode stack */
+ unsigned long s[12]; /* s[0]: frame pointer */
+ struct __riscv_d_ext_state fstate;
+ unsigned long bad_cause;
+ unsigned long vstate_ctrl;
+ struct __riscv_v_ext_state vstate;
+ unsigned long align_ctl;
+};
+
+/* Whitelist the fstate from the task_struct for hardened usercopy */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = offsetof(struct thread_struct, fstate);
+ *size = sizeof_field(struct thread_struct, fstate);
+}
+
+#define INIT_THREAD { \
+ .sp = sizeof(init_stack) + (long)&init_stack, \
+ .align_ctl = PR_UNALIGN_NOPRINT, \
+}
+
+#define task_pt_regs(tsk) \
+ ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
+ - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
+
+
+/* Do necessary setup to start up a newly executed thread. */
+extern void start_thread(struct pt_regs *regs,
+ unsigned long pc, unsigned long sp);
+
+extern unsigned long __get_wchan(struct task_struct *p);
+
+
+static inline void wait_for_interrupt(void)
+{
+ __asm__ __volatile__ ("wfi");
+}
+
+extern phys_addr_t dma32_phys_limit;
+
+struct device_node;
+int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
+int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid);
+int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
+
+extern void riscv_fill_hwcap(void);
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+extern unsigned long signal_minsigstksz __ro_after_init;
+
+#ifdef CONFIG_RISCV_ISA_V
+/* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */
+#define RISCV_V_SET_CONTROL(arg) riscv_v_vstate_ctrl_set_current(arg)
+#define RISCV_V_GET_CONTROL() riscv_v_vstate_ctrl_get_current()
+extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
+extern long riscv_v_vstate_ctrl_get_current(void);
+#endif /* CONFIG_RISCV_ISA_V */
+
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
+#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/riscv/include/asm/ptdump.h b/riscv/include/asm/ptdump.h
new file mode 100644
index 0000000..3c9ea6d
--- /dev/null
+++ b/riscv/include/asm/ptdump.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#ifndef _ASM_RISCV_PTDUMP_H
+#define _ASM_RISCV_PTDUMP_H
+
+void ptdump_check_wx(void);
+
+#ifdef CONFIG_DEBUG_WX
+static inline void debug_checkwx(void)
+{
+ ptdump_check_wx();
+}
+#else
+static inline void debug_checkwx(void)
+{
+}
+#endif
+
+#endif /* _ASM_RISCV_PTDUMP_H */
diff --git a/riscv/include/asm/ptrace.h b/riscv/include/asm/ptrace.h
new file mode 100644
index 0000000..b5b0adc
--- /dev/null
+++ b/riscv/include/asm/ptrace.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PTRACE_H
+#define _ASM_RISCV_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/csr.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long epc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+ /* Supervisor/Machine CSRs */
+ unsigned long status;
+ unsigned long badaddr;
+ unsigned long cause;
+ /* a0 value before the syscall */
+ unsigned long orig_a0;
+};
+
+#define PTRACE_SYSEMU 0x1f
+#define PTRACE_SYSEMU_SINGLESTEP 0x20
+
+#ifdef CONFIG_64BIT
+#define REG_FMT "%016lx"
+#else
+#define REG_FMT "%08lx"
+#endif
+
+#define user_mode(regs) (((regs)->status & SR_PP) == 0)
+
+#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_a0)
+
+/* Helpers for working with the instruction pointer */
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return regs->epc;
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->epc = val;
+}
+
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* Helpers for working with the user stack pointer */
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->sp = val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
+/* Helpers for working with the frame pointer */
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return regs->s0;
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->s0 = val;
+}
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->a0 = val;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * regs_get_argument() returns @n th argument of the function call.
+ *
+ * Note you can get the parameter correctly if the function has no
+ * more than eight arguments.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
+ unsigned int n)
+{
+ static const int nr_reg_arguments = 8;
+ static const unsigned int argument_offs[] = {
+ offsetof(struct pt_regs, a0),
+ offsetof(struct pt_regs, a1),
+ offsetof(struct pt_regs, a2),
+ offsetof(struct pt_regs, a3),
+ offsetof(struct pt_regs, a4),
+ offsetof(struct pt_regs, a5),
+ offsetof(struct pt_regs, a6),
+ offsetof(struct pt_regs, a7),
+ };
+
+ if (n < nr_reg_arguments)
+ return regs_get_register(regs, argument_offs[n]);
+ return 0;
+}
+
+static inline int regs_irqs_disabled(struct pt_regs *regs)
+{
+ return !(regs->status & SR_PIE);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/riscv/include/asm/sbi.h b/riscv/include/asm/sbi.h
new file mode 100644
index 0000000..0892f44
--- /dev/null
+++ b/riscv/include/asm/sbi.h
@@ -0,0 +1,347 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_SBI_H
+#define _ASM_RISCV_SBI_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_RISCV_SBI
+enum sbi_ext_id {
+#ifdef CONFIG_RISCV_SBI_V01
+ SBI_EXT_0_1_SET_TIMER = 0x0,
+ SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1,
+ SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2,
+ SBI_EXT_0_1_CLEAR_IPI = 0x3,
+ SBI_EXT_0_1_SEND_IPI = 0x4,
+ SBI_EXT_0_1_REMOTE_FENCE_I = 0x5,
+ SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6,
+ SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7,
+ SBI_EXT_0_1_SHUTDOWN = 0x8,
+#endif
+ SBI_EXT_BASE = 0x10,
+ SBI_EXT_TIME = 0x54494D45,
+ SBI_EXT_IPI = 0x735049,
+ SBI_EXT_RFENCE = 0x52464E43,
+ SBI_EXT_HSM = 0x48534D,
+ SBI_EXT_SRST = 0x53525354,
+ SBI_EXT_PMU = 0x504D55,
+ SBI_EXT_DBCN = 0x4442434E,
+
+ /* Experimentals extensions must lie within this range */
+ SBI_EXT_EXPERIMENTAL_START = 0x08000000,
+ SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF,
+
+ /* Vendor extensions must lie within this range */
+ SBI_EXT_VENDOR_START = 0x09000000,
+ SBI_EXT_VENDOR_END = 0x09FFFFFF,
+};
+
+enum sbi_ext_base_fid {
+ SBI_EXT_BASE_GET_SPEC_VERSION = 0,
+ SBI_EXT_BASE_GET_IMP_ID,
+ SBI_EXT_BASE_GET_IMP_VERSION,
+ SBI_EXT_BASE_PROBE_EXT,
+ SBI_EXT_BASE_GET_MVENDORID,
+ SBI_EXT_BASE_GET_MARCHID,
+ SBI_EXT_BASE_GET_MIMPID,
+};
+
+enum sbi_ext_time_fid {
+ SBI_EXT_TIME_SET_TIMER = 0,
+};
+
+enum sbi_ext_ipi_fid {
+ SBI_EXT_IPI_SEND_IPI = 0,
+};
+
+enum sbi_ext_rfence_fid {
+ SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
+ SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
+};
+
+enum sbi_ext_hsm_fid {
+ SBI_EXT_HSM_HART_START = 0,
+ SBI_EXT_HSM_HART_STOP,
+ SBI_EXT_HSM_HART_STATUS,
+ SBI_EXT_HSM_HART_SUSPEND,
+};
+
+enum sbi_hsm_hart_state {
+ SBI_HSM_STATE_STARTED = 0,
+ SBI_HSM_STATE_STOPPED,
+ SBI_HSM_STATE_START_PENDING,
+ SBI_HSM_STATE_STOP_PENDING,
+ SBI_HSM_STATE_SUSPENDED,
+ SBI_HSM_STATE_SUSPEND_PENDING,
+ SBI_HSM_STATE_RESUME_PENDING,
+};
+
+#define SBI_HSM_SUSP_BASE_MASK 0x7fffffff
+#define SBI_HSM_SUSP_NON_RET_BIT 0x80000000
+#define SBI_HSM_SUSP_PLAT_BASE 0x10000000
+
+#define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000
+#define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE
+#define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK
+#define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT
+#define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \
+ SBI_HSM_SUSP_PLAT_BASE)
+#define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \
+ SBI_HSM_SUSP_BASE_MASK)
+
+enum sbi_ext_srst_fid {
+ SBI_EXT_SRST_RESET = 0,
+};
+
+enum sbi_srst_reset_type {
+ SBI_SRST_RESET_TYPE_SHUTDOWN = 0,
+ SBI_SRST_RESET_TYPE_COLD_REBOOT,
+ SBI_SRST_RESET_TYPE_WARM_REBOOT,
+};
+
+enum sbi_srst_reset_reason {
+ SBI_SRST_RESET_REASON_NONE = 0,
+ SBI_SRST_RESET_REASON_SYS_FAILURE,
+};
+
+enum sbi_ext_pmu_fid {
+ SBI_EXT_PMU_NUM_COUNTERS = 0,
+ SBI_EXT_PMU_COUNTER_GET_INFO,
+ SBI_EXT_PMU_COUNTER_CFG_MATCH,
+ SBI_EXT_PMU_COUNTER_START,
+ SBI_EXT_PMU_COUNTER_STOP,
+ SBI_EXT_PMU_COUNTER_FW_READ,
+};
+
+union sbi_pmu_ctr_info {
+ unsigned long value;
+ struct {
+ unsigned long csr:12;
+ unsigned long width:6;
+#if __riscv_xlen == 32
+ unsigned long reserved:13;
+#else
+ unsigned long reserved:45;
+#endif
+ unsigned long type:1;
+ };
+};
+
+#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
+#define RISCV_PMU_RAW_EVENT_IDX 0x20000
+
+/** General pmu event codes specified in SBI PMU extension */
+enum sbi_pmu_hw_generic_events_t {
+ SBI_PMU_HW_NO_EVENT = 0,
+ SBI_PMU_HW_CPU_CYCLES = 1,
+ SBI_PMU_HW_INSTRUCTIONS = 2,
+ SBI_PMU_HW_CACHE_REFERENCES = 3,
+ SBI_PMU_HW_CACHE_MISSES = 4,
+ SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5,
+ SBI_PMU_HW_BRANCH_MISSES = 6,
+ SBI_PMU_HW_BUS_CYCLES = 7,
+ SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8,
+ SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9,
+ SBI_PMU_HW_REF_CPU_CYCLES = 10,
+
+ SBI_PMU_HW_GENERAL_MAX,
+};
+
+/**
+ * Special "firmware" events provided by the firmware, even if the hardware
+ * does not support performance events. These events are encoded as a raw
+ * event type in Linux kernel perf framework.
+ */
+enum sbi_pmu_fw_generic_events_t {
+ SBI_PMU_FW_MISALIGNED_LOAD = 0,
+ SBI_PMU_FW_MISALIGNED_STORE = 1,
+ SBI_PMU_FW_ACCESS_LOAD = 2,
+ SBI_PMU_FW_ACCESS_STORE = 3,
+ SBI_PMU_FW_ILLEGAL_INSN = 4,
+ SBI_PMU_FW_SET_TIMER = 5,
+ SBI_PMU_FW_IPI_SENT = 6,
+ SBI_PMU_FW_IPI_RCVD = 7,
+ SBI_PMU_FW_FENCE_I_SENT = 8,
+ SBI_PMU_FW_FENCE_I_RCVD = 9,
+ SBI_PMU_FW_SFENCE_VMA_SENT = 10,
+ SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
+ SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
+ SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
+
+ SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
+ SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
+
+ SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
+ SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
+ SBI_PMU_FW_MAX,
+};
+
+/* SBI PMU event types */
+enum sbi_pmu_event_type {
+ SBI_PMU_EVENT_TYPE_HW = 0x0,
+ SBI_PMU_EVENT_TYPE_CACHE = 0x1,
+ SBI_PMU_EVENT_TYPE_RAW = 0x2,
+ SBI_PMU_EVENT_TYPE_FW = 0xf,
+};
+
+/* SBI PMU event types */
+enum sbi_pmu_ctr_type {
+ SBI_PMU_CTR_TYPE_HW = 0x0,
+ SBI_PMU_CTR_TYPE_FW,
+};
+
+/* Helper macros to decode event idx */
+#define SBI_PMU_EVENT_IDX_OFFSET 20
+#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF
+#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF
+#define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000
+#define SBI_PMU_EVENT_RAW_IDX 0x20000
+#define SBI_PMU_FIXED_CTR_MASK 0x07
+
+#define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8
+#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
+#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
+
+#define SBI_PMU_EVENT_CACHE_ID_SHIFT 3
+#define SBI_PMU_EVENT_CACHE_OP_SHIFT 1
+
+#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
+
+/* Flags defined for config matching function */
+#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0)
+#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1)
+#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2)
+#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3)
+#define SBI_PMU_CFG_FLAG_SET_VSINH (1 << 4)
+#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5)
+#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6)
+#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7)
+
+/* Flags defined for counter start function */
+#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0)
+
+/* Flags defined for counter stop function */
+#define SBI_PMU_STOP_FLAG_RESET (1 << 0)
+
+enum sbi_ext_dbcn_fid {
+ SBI_EXT_DBCN_CONSOLE_WRITE = 0,
+ SBI_EXT_DBCN_CONSOLE_READ = 1,
+ SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2,
+};
+
+#define SBI_SPEC_VERSION_DEFAULT 0x1
+#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
+#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
+#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
+
+/* SBI return error codes */
+#define SBI_SUCCESS 0
+#define SBI_ERR_FAILURE -1
+#define SBI_ERR_NOT_SUPPORTED -2
+#define SBI_ERR_INVALID_PARAM -3
+#define SBI_ERR_DENIED -4
+#define SBI_ERR_INVALID_ADDRESS -5
+#define SBI_ERR_ALREADY_AVAILABLE -6
+#define SBI_ERR_ALREADY_STARTED -7
+#define SBI_ERR_ALREADY_STOPPED -8
+
+extern unsigned long sbi_spec_version;
+struct sbiret {
+ long error;
+ long value;
+};
+
+void sbi_init(void);
+struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5);
+
+void sbi_console_putchar(int ch);
+int sbi_console_getchar(void);
+long sbi_get_mvendorid(void);
+long sbi_get_marchid(void);
+long sbi_get_mimpid(void);
+void sbi_set_timer(uint64_t stime_value);
+void sbi_shutdown(void);
+void sbi_send_ipi(unsigned int cpu);
+int sbi_remote_fence_i(const struct cpumask *cpu_mask);
+
+int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid);
+int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size);
+int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long vmid);
+int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size);
+int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid);
+long sbi_probe_extension(int ext);
+
+/* Check if current SBI specification version is 0.1 or not */
+static inline int sbi_spec_is_0_1(void)
+{
+ return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0;
+}
+
+/* Get the major version of SBI */
+static inline unsigned long sbi_major_version(void)
+{
+ return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) &
+ SBI_SPEC_VERSION_MAJOR_MASK;
+}
+
+/* Get the minor version of SBI */
+static inline unsigned long sbi_minor_version(void)
+{
+ return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
+}
+
+/* Make SBI version */
+static inline unsigned long sbi_mk_version(unsigned long major,
+ unsigned long minor)
+{
+ return ((major & SBI_SPEC_VERSION_MAJOR_MASK) <<
+ SBI_SPEC_VERSION_MAJOR_SHIFT) | minor;
+}
+
+int sbi_err_map_linux_errno(int err);
+#else /* CONFIG_RISCV_SBI */
+static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
+static inline void sbi_init(void) {}
+#endif /* CONFIG_RISCV_SBI */
+
+unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
+unsigned long riscv_cached_marchid(unsigned int cpu_id);
+unsigned long riscv_cached_mimpid(unsigned int cpu_id);
+
+#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
+void sbi_ipi_init(void);
+#else
+static inline void sbi_ipi_init(void) { }
+#endif
+
+#endif /* _ASM_RISCV_SBI_H */
diff --git a/riscv/include/asm/scs.h b/riscv/include/asm/scs.h
new file mode 100644
index 0000000..0e45db7
--- /dev/null
+++ b/riscv/include/asm/scs.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SCS_H
+#define _ASM_SCS_H
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+/* Load init_shadow_call_stack to gp. */
+.macro scs_load_init_stack
+ la gp, init_shadow_call_stack
+ XIP_FIXUP_OFFSET gp
+.endm
+
+/* Load the per-CPU IRQ shadow call stack to gp. */
+.macro scs_load_irq_stack tmp
+ load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp
+.endm
+
+/* Load task_scs_sp(current) to gp. */
+.macro scs_load_current
+ REG_L gp, TASK_TI_SCS_SP(tp)
+.endm
+
+/* Load task_scs_sp(current) to gp, but only if tp has changed. */
+.macro scs_load_current_if_task_changed prev
+ beq \prev, tp, _skip_scs
+ scs_load_current
+_skip_scs:
+.endm
+
+/* Save gp to task_scs_sp(current). */
+.macro scs_save_current
+ REG_S gp, TASK_TI_SCS_SP(tp)
+.endm
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+.macro scs_load_init_stack
+.endm
+.macro scs_load_irq_stack tmp
+.endm
+.macro scs_load_current
+.endm
+.macro scs_load_current_if_task_changed prev
+.endm
+.macro scs_save_current
+.endm
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SCS_H */
diff --git a/riscv/include/asm/scs.h~ b/riscv/include/asm/scs.h~
new file mode 100644
index 0000000..0e45db7
--- /dev/null
+++ b/riscv/include/asm/scs.h~
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SCS_H
+#define _ASM_SCS_H
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+/* Load init_shadow_call_stack to gp. */
+.macro scs_load_init_stack
+ la gp, init_shadow_call_stack
+ XIP_FIXUP_OFFSET gp
+.endm
+
+/* Load the per-CPU IRQ shadow call stack to gp. */
+.macro scs_load_irq_stack tmp
+ load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp
+.endm
+
+/* Load task_scs_sp(current) to gp. */
+.macro scs_load_current
+ REG_L gp, TASK_TI_SCS_SP(tp)
+.endm
+
+/* Load task_scs_sp(current) to gp, but only if tp has changed. */
+.macro scs_load_current_if_task_changed prev
+ beq \prev, tp, _skip_scs
+ scs_load_current
+_skip_scs:
+.endm
+
+/* Save gp to task_scs_sp(current). */
+.macro scs_save_current
+ REG_S gp, TASK_TI_SCS_SP(tp)
+.endm
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+.macro scs_load_init_stack
+.endm
+.macro scs_load_irq_stack tmp
+.endm
+.macro scs_load_current
+.endm
+.macro scs_load_current_if_task_changed prev
+.endm
+.macro scs_save_current
+.endm
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SCS_H */
diff --git a/riscv/include/asm/seccomp.h b/riscv/include/asm/seccomp.h
new file mode 100644
index 0000000..c7ee6a3
--- /dev/null
+++ b/riscv/include/asm/seccomp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#include <asm-generic/seccomp.h>
+
+#ifdef CONFIG_64BIT
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV64
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "riscv64"
+#else /* !CONFIG_64BIT */
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV32
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "riscv32"
+#endif
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/riscv/include/asm/sections.h b/riscv/include/asm/sections.h
new file mode 100644
index 0000000..a393d50
--- /dev/null
+++ b/riscv/include/asm/sections.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+#include <linux/mm.h>
+
+extern char _start[];
+extern char _start_kernel[];
+extern char __init_data_begin[], __init_data_end[];
+extern char __init_text_begin[], __init_text_end[];
+extern char __alt_start[], __alt_end[];
+extern char __exittext_begin[], __exittext_end[];
+
+static inline bool is_va_kernel_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)_start;
+ uintptr_t end = (uintptr_t)__init_data_begin;
+
+ return va >= start && va < end;
+}
+
+static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)lm_alias(_start);
+ uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
+
+ return va >= start && va < end;
+}
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/riscv/include/asm/semihost.h b/riscv/include/asm/semihost.h
new file mode 100644
index 0000000..557a349
--- /dev/null
+++ b/riscv/include/asm/semihost.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 tinylab.org
+ * Author: Bin Meng <bmeng@tinylab.org>
+ */
+
+#ifndef _RISCV_SEMIHOST_H_
+#define _RISCV_SEMIHOST_H_
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("addi a1, %0, 0\n"
+ "addi a0, zero, 3\n"
+ ".balign 16\n"
+ ".option push\n"
+ ".option norvc\n"
+ "slli zero, zero, 0x1f\n"
+ "ebreak\n"
+ "srai zero, zero, 0x7\n"
+ ".option pop\n"
+ : : "r" (&c) : "a0", "a1", "memory");
+}
+
+#endif /* _RISCV_SEMIHOST_H_ */
diff --git a/riscv/include/asm/set_memory.h b/riscv/include/asm/set_memory.h
new file mode 100644
index 0000000..ec11001
--- /dev/null
+++ b/riscv/include/asm/set_memory.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#ifndef _ASM_RISCV_SET_MEMORY_H
+#define _ASM_RISCV_SET_MEMORY_H
+
+#ifndef __ASSEMBLY__
+/*
+ * Functions to change memory attributes.
+ */
+#ifdef CONFIG_MMU
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_rw_nx(unsigned long addr, int numpages);
+static __always_inline int set_kernel_memory(char *startp, char *endp,
+ int (*set_memory)(unsigned long start,
+ int num_pages))
+{
+ unsigned long start = (unsigned long)startp;
+ unsigned long end = (unsigned long)endp;
+ int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
+
+ return set_memory(start, num_pages);
+}
+#else
+static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
+static inline int set_kernel_memory(char *startp, char *endp,
+ int (*set_memory)(unsigned long start,
+ int num_pages))
+{
+ return 0;
+}
+#endif
+
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+#ifdef CONFIG_64BIT
+#define SECTION_ALIGN (1 << 21)
+#else
+#define SECTION_ALIGN (1 << 22)
+#endif
+#else /* !CONFIG_STRICT_KERNEL_RWX */
+#define SECTION_ALIGN L1_CACHE_BYTES
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
+#define PECOFF_SECTION_ALIGNMENT 0x1000
+#define PECOFF_FILE_ALIGNMENT 0x200
+
+#endif /* _ASM_RISCV_SET_MEMORY_H */
diff --git a/riscv/include/asm/signal.h b/riscv/include/asm/signal.h
new file mode 100644
index 0000000..956ae0a
--- /dev/null
+++ b/riscv/include/asm/signal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL_H
+#define __ASM_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+asmlinkage __visible
+void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
+
+#endif
diff --git a/riscv/include/asm/signal32.h b/riscv/include/asm/signal32.h
new file mode 100644
index 0000000..96dc569
--- /dev/null
+++ b/riscv/include/asm/signal32.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL32_H
+#define __ASM_SIGNAL32_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs);
+#else
+static inline
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ return -1;
+}
+#endif
+
+#endif
diff --git a/riscv/include/asm/smp.h b/riscv/include/asm/smp.h
new file mode 100644
index 0000000..0d55584
--- /dev/null
+++ b/riscv/include/asm/smp.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_SMP_H
+#define _ASM_RISCV_SMP_H
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+#include <linux/thread_info.h>
+
+#define INVALID_HARTID ULONG_MAX
+
+struct seq_file;
+extern unsigned long boot_cpu_hartid;
+
+#ifdef CONFIG_SMP
+
+#include <linux/jump_label.h>
+
+/*
+ * Mapping between linux logical cpu index and hartid.
+ */
+extern unsigned long __cpuid_to_hartid_map[NR_CPUS];
+#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu]
+
+/* print IPI stats */
+void show_ipi_stats(struct seq_file *p, int prec);
+
+/* SMP initialization hook for setup_arch */
+void __init setup_smp(void);
+
+/* Hook for the generic smp_call_function_many() routine. */
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+/* Hook for the generic smp_call_function_single() routine. */
+void arch_send_call_function_single_ipi(int cpu);
+
+int riscv_hartid_to_cpuid(unsigned long hartid);
+
+/* Enable IPI for CPU hotplug */
+void riscv_ipi_enable(void);
+
+/* Disable IPI for CPU hotplug */
+void riscv_ipi_disable(void);
+
+/* Check if IPI interrupt numbers are available */
+bool riscv_ipi_have_virq_range(void);
+
+/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
+void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence);
+
+/* Check if we can use IPIs for remote FENCEs */
+DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
+#define riscv_use_ipi_for_rfence() \
+ static_branch_unlikely(&riscv_ipi_for_rfence)
+
+/* Check other CPUs stop or not */
+bool smp_crash_stop_failed(void);
+
+/* Secondary hart entry */
+asmlinkage void smp_callin(void);
+
+/*
+ * Obtains the hart ID of the currently executing task. This relies on
+ * THREAD_INFO_IN_TASK, but we define that unconditionally.
+ */
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+#if defined CONFIG_HOTPLUG_CPU
+int __cpu_disable(void);
+static inline void __cpu_die(unsigned int cpu) { }
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#else
+
+static inline void show_ipi_stats(struct seq_file *p, int prec)
+{
+}
+
+static inline int riscv_hartid_to_cpuid(unsigned long hartid)
+{
+ if (hartid == boot_cpu_hartid)
+ return 0;
+
+ return -1;
+}
+static inline unsigned long cpuid_to_hartid_map(int cpu)
+{
+ return boot_cpu_hartid;
+}
+
+static inline void riscv_ipi_enable(void)
+{
+}
+
+static inline void riscv_ipi_disable(void)
+{
+}
+
+static inline bool riscv_ipi_have_virq_range(void)
+{
+ return false;
+}
+
+static inline void riscv_ipi_set_virq_range(int virq, int nr,
+ bool use_for_rfence)
+{
+}
+
+static inline bool riscv_use_ipi_for_rfence(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SMP */
+
+#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
+bool cpu_has_hotplug(unsigned int cpu);
+#else
+static inline bool cpu_has_hotplug(unsigned int cpu)
+{
+ return false;
+}
+#endif
+
+#endif /* _ASM_RISCV_SMP_H */
diff --git a/riscv/include/asm/soc.h b/riscv/include/asm/soc.h
new file mode 100644
index 0000000..f494066
--- /dev/null
+++ b/riscv/include/asm/soc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef _ASM_RISCV_SOC_H
+#define _ASM_RISCV_SOC_H
+
+#include <linux/of.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#define SOC_EARLY_INIT_DECLARE(name, compat, fn) \
+ static const struct of_device_id __soc_early_init__##name \
+ __used __section("__soc_early_init_table") \
+ = { .compatible = compat, .data = fn }
+
+void soc_early_init(void);
+
+extern unsigned long __soc_early_init_table_start;
+extern unsigned long __soc_early_init_table_end;
+
+#endif
diff --git a/riscv/include/asm/sparsemem.h b/riscv/include/asm/sparsemem.h
new file mode 100644
index 0000000..63acaec
--- /dev/null
+++ b/riscv/include/asm/sparsemem.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_SPARSEMEM_H
+#define _ASM_RISCV_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+#ifdef CONFIG_64BIT
+#define MAX_PHYSMEM_BITS 56
+#else
+#define MAX_PHYSMEM_BITS 34
+#endif /* CONFIG_64BIT */
+#define SECTION_SIZE_BITS 27
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_RISCV_SPARSEMEM_H */
diff --git a/riscv/include/asm/stackprotector.h b/riscv/include/asm/stackprotector.h
new file mode 100644
index 0000000..43895b9
--- /dev/null
+++ b/riscv/include/asm/stackprotector.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_STACKPROTECTOR_H
+#define _ASM_RISCV_STACKPROTECTOR_H
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary = get_random_canary();
+
+ current->stack_canary = canary;
+ if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
+ __stack_chk_guard = current->stack_canary;
+}
+#endif /* _ASM_RISCV_STACKPROTECTOR_H */
diff --git a/riscv/include/asm/stacktrace.h b/riscv/include/asm/stacktrace.h
new file mode 100644
index 0000000..b1495a7
--- /dev/null
+++ b/riscv/include/asm/stacktrace.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_STACKTRACE_H
+#define _ASM_RISCV_STACKTRACE_H
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ bool (*fn)(void *, unsigned long), void *arg);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+ const char *loglvl);
+
+static inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
+
+#ifdef CONFIG_VMAP_STACK
+DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+#endif /* CONFIG_VMAP_STACK */
+
+#endif /* _ASM_RISCV_STACKTRACE_H */
diff --git a/riscv/include/asm/string.h b/riscv/include/asm/string.h
new file mode 100644
index 0000000..a96b1fe
--- /dev/null
+++ b/riscv/include/asm/string.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_STRING_H
+#define _ASM_RISCV_STRING_H
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+
+#define __HAVE_ARCH_MEMSET
+extern asmlinkage void *memset(void *, int, size_t);
+extern asmlinkage void *__memset(void *, int, size_t);
+#define __HAVE_ARCH_MEMCPY
+extern asmlinkage void *memcpy(void *, const void *, size_t);
+extern asmlinkage void *__memcpy(void *, const void *, size_t);
+#define __HAVE_ARCH_MEMMOVE
+extern asmlinkage void *memmove(void *, const void *, size_t);
+extern asmlinkage void *__memmove(void *, const void *, size_t);
+
+#define __HAVE_ARCH_STRCMP
+extern asmlinkage int strcmp(const char *cs, const char *ct);
+
+#define __HAVE_ARCH_STRLEN
+extern asmlinkage __kernel_size_t strlen(const char *);
+
+#define __HAVE_ARCH_STRNCMP
+extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
+
+/* For those files which don't want to check by kasan. */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
+#endif /* _ASM_RISCV_STRING_H */
diff --git a/riscv/include/asm/suspend.h b/riscv/include/asm/suspend.h
new file mode 100644
index 0000000..02f8786
--- /dev/null
+++ b/riscv/include/asm/suspend.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+ /* Saved and restored by low-level functions */
+ struct pt_regs regs;
+ /* Saved and restored by high-level functions */
+ unsigned long scratch;
+ unsigned long tvec;
+ unsigned long ie;
+#ifdef CONFIG_MMU
+ unsigned long satp;
+#endif
+};
+
+/*
+ * Used by hibernation core and cleared during resume sequence
+ */
+extern int in_suspend;
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+ int (*finish)(unsigned long arg,
+ unsigned long entry,
+ unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+/* Used to save and restore the CSRs */
+void suspend_save_csrs(struct suspend_context *context);
+void suspend_restore_csrs(struct suspend_context *context);
+
+/* Low-level API to support hibernation */
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+int __hibernate_cpu_resume(void);
+
+/* Used to resume on the CPU we hibernated on */
+int hibernate_resume_nonboot_cpu_disable(void);
+
+asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
+ unsigned long cpu_resume);
+asmlinkage int hibernate_core_restore_code(void);
+#endif
diff --git a/riscv/include/asm/switch_to.h b/riscv/include/asm/switch_to.h
new file mode 100644
index 0000000..f90d8e4
--- /dev/null
+++ b/riscv/include/asm/switch_to.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_SWITCH_TO_H
+#define _ASM_RISCV_SWITCH_TO_H
+
+#include <linux/jump_label.h>
+#include <linux/sched/task_stack.h>
+#include <asm/vector.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+#ifdef CONFIG_FPU
+extern void __fstate_save(struct task_struct *save_to);
+extern void __fstate_restore(struct task_struct *restore_from);
+
+static inline void __fstate_clean(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN;
+}
+
+static inline void fstate_off(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_FS) | SR_FS_OFF;
+}
+
+static inline void fstate_save(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_FS) == SR_FS_DIRTY) {
+ __fstate_save(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void fstate_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_FS) != SR_FS_OFF) {
+ __fstate_restore(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(prev);
+ if (unlikely(regs->status & SR_SD))
+ fstate_save(prev, regs);
+ fstate_restore(next, task_pt_regs(next));
+}
+
+static __always_inline bool has_fpu(void)
+{
+ return riscv_has_extension_likely(RISCV_ISA_EXT_f) ||
+ riscv_has_extension_likely(RISCV_ISA_EXT_d);
+}
+#else
+static __always_inline bool has_fpu(void) { return false; }
+#define fstate_save(task, regs) do { } while (0)
+#define fstate_restore(task, regs) do { } while (0)
+#define __switch_to_fpu(__prev, __next) do { } while (0)
+#endif
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+do { \
+ struct task_struct *__prev = (prev); \
+ struct task_struct *__next = (next); \
+ if (has_fpu()) \
+ __switch_to_fpu(__prev, __next); \
+ if (has_vector()) \
+ __switch_to_vector(__prev, __next); \
+ ((last) = __switch_to(__prev, __next)); \
+} while (0)
+
+#endif /* _ASM_RISCV_SWITCH_TO_H */
diff --git a/riscv/include/asm/syscall.h b/riscv/include/asm/syscall.h
new file mode 100644
index 0000000..121fff4
--- /dev/null
+++ b/riscv/include/asm/syscall.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2015 Regents of the University of California, Berkeley
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_RISCV_SYSCALL_H
+#define _ASM_RISCV_SYSCALL_H
+
+#include <asm/hwprobe.h>
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+/* The array of function pointers for syscalls. */
+extern void * const sys_call_table[];
+extern void * const compat_sys_call_table[];
+
+/*
+ * Only the low 32 bits of orig_r0 are meaningful, so we return int.
+ * This importantly ignores the high bits on 64-bit, so comparisons
+ * sign-extend the low 32 bits.
+ */
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a7;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->a0 = regs->orig_a0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->a0;
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->orig_a0;
+ args++;
+ memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+#ifdef CONFIG_64BIT
+ return AUDIT_ARCH_RISCV64;
+#else
+ return AUDIT_ARCH_RISCV32;
+#endif
+}
+
+typedef long (*syscall_t)(const struct pt_regs *);
+static inline void syscall_handler(struct pt_regs *regs, ulong syscall)
+{
+ syscall_t fn;
+
+#ifdef CONFIG_COMPAT
+ if ((regs->status & SR_UXL) == SR_UXL_32)
+ fn = compat_sys_call_table[syscall];
+ else
+#endif
+ fn = sys_call_table[syscall];
+
+ regs->a0 = fn(regs);
+}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
+
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+
+asmlinkage long sys_riscv_hwprobe(struct riscv_hwprobe *, size_t, size_t,
+ unsigned long *, unsigned int);
+#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/riscv/include/asm/syscall_wrapper.h b/riscv/include/asm/syscall_wrapper.h
new file mode 100644
index 0000000..eeec04b
--- /dev/null
+++ b/riscv/include/asm/syscall_wrapper.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - riscv specific wrappers to syscall definitions
+ *
+ * Based on arch/arm64/include/syscall_wrapper.h
+ */
+
+#ifndef __ASM_SYSCALL_WRAPPER_H
+#define __ASM_SYSCALL_WRAPPER_H
+
+#include <asm/ptrace.h>
+
+asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->orig_a0,,regs->a1,,regs->a2 \
+ ,,regs->a3,,regs->a4,,regs->a5,,regs->a6)
+
+#ifdef CONFIG_COMPAT
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define COMPAT_SYSCALL_DEFINE0(sname) \
+ asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__riscv_compat_sys_##sname, ERRNO); \
+ asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL_COMPAT(name) \
+ asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs); \
+ asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#endif /* CONFIG_COMPAT */
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__riscv_sys_##sname, ERRNO); \
+ asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs); \
+ asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/riscv/include/asm/thread_info.h b/riscv/include/asm/thread_info.h
new file mode 100644
index 0000000..f3301d8
--- /dev/null
+++ b/riscv/include/asm/thread_info.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_THREAD_INFO_H
+#define _ASM_RISCV_THREAD_INFO_H
+
+#include <asm/page.h>
+//#include <linux/const.h>
+
+/* thread information allocation */
+#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
+#define OVERFLOW_STACK_SIZE SZ_4K
+#define SHADOW_OVERFLOW_STACK_SIZE (1024)
+
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - if the members of this struct changes, the assembly constants
+ * in asm-offsets.c must be updated accordingly
+ * - thread_info is included in task_struct at an offset of 0. This means that
+ * tp points to both thread_info and task_struct.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0=>preemptible, <0=>BUG */
+ /*
+ * These stack pointers are overwritten on every system call or
+ * exception. SP is also saved to the stack it can be recovered when
+ * overwritten.
+ */
+ long kernel_sp; /* Kernel stack pointer */
+ long user_sp; /* User stack pointer */
+ int cpu;
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+#ifdef CONFIG_SHADOW_CALL_STACK
+ void *scs_base;
+ void *scs_sp;
+#endif
+};
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define INIT_SCS \
+ .scs_base = init_shadow_call_stack, \
+ .scs_sp = init_shadow_call_stack,
+#else
+#define INIT_SCS
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ INIT_SCS \
+}
+
+void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
+#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
+#define TIF_32BIT 11 /* compat-mode 32bit process */
+
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
+
+#define _TIF_WORK_MASK \
+ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
+
+#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/riscv/include/asm/thread_info.h~ b/riscv/include/asm/thread_info.h~
new file mode 100644
index 0000000..5747799
--- /dev/null
+++ b/riscv/include/asm/thread_info.h~
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_THREAD_INFO_H
+#define _ASM_RISCV_THREAD_INFO_H
+
+#include <asm/page.h>
+#include <linux/const.h>
+
+/* thread information allocation */
+#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
+#define OVERFLOW_STACK_SIZE SZ_4K
+#define SHADOW_OVERFLOW_STACK_SIZE (1024)
+
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - if the members of this struct changes, the assembly constants
+ * in asm-offsets.c must be updated accordingly
+ * - thread_info is included in task_struct at an offset of 0. This means that
+ * tp points to both thread_info and task_struct.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0=>preemptible, <0=>BUG */
+ /*
+ * These stack pointers are overwritten on every system call or
+ * exception. SP is also saved to the stack it can be recovered when
+ * overwritten.
+ */
+ long kernel_sp; /* Kernel stack pointer */
+ long user_sp; /* User stack pointer */
+ int cpu;
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+#ifdef CONFIG_SHADOW_CALL_STACK
+ void *scs_base;
+ void *scs_sp;
+#endif
+};
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define INIT_SCS \
+ .scs_base = init_shadow_call_stack, \
+ .scs_sp = init_shadow_call_stack,
+#else
+#define INIT_SCS
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ INIT_SCS \
+}
+
+void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
+#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
+#define TIF_32BIT 11 /* compat-mode 32bit process */
+
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
+
+#define _TIF_WORK_MASK \
+ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
+
+#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/riscv/include/asm/timex.h b/riscv/include/asm/timex.h
new file mode 100644
index 0000000..a066978
--- /dev/null
+++ b/riscv/include/asm/timex.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TIMEX_H
+#define _ASM_RISCV_TIMEX_H
+
+#include <asm/csr.h>
+
+typedef unsigned long cycles_t;
+
+#ifdef CONFIG_RISCV_M_MODE
+
+#include <asm/clint.h>
+
+#ifdef CONFIG_64BIT
+static inline cycles_t get_cycles(void)
+{
+ return readq_relaxed(clint_time_val);
+}
+#else /* !CONFIG_64BIT */
+static inline u32 get_cycles(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val));
+}
+#define get_cycles get_cycles
+
+static inline u32 get_cycles_hi(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val) + 1);
+}
+#define get_cycles_hi get_cycles_hi
+#endif /* CONFIG_64BIT */
+
+/*
+ * Much like MIPS, we may not have a viable counter to use at an early point
+ * in the boot process. Unfortunately we don't have a fallback, so instead
+ * we just return 0.
+ */
+static inline unsigned long random_get_entropy(void)
+{
+ if (unlikely(clint_time_val == NULL))
+ return random_get_entropy_fallback();
+ return get_cycles();
+}
+#define random_get_entropy() random_get_entropy()
+
+#else /* CONFIG_RISCV_M_MODE */
+
+static inline cycles_t get_cycles(void)
+{
+ return csr_read(CSR_TIME);
+}
+#define get_cycles get_cycles
+
+static inline u32 get_cycles_hi(void)
+{
+ return csr_read(CSR_TIMEH);
+}
+#define get_cycles_hi get_cycles_hi
+
+#endif /* !CONFIG_RISCV_M_MODE */
+
+#ifdef CONFIG_64BIT
+static inline u64 get_cycles64(void)
+{
+ return get_cycles();
+}
+#else /* CONFIG_64BIT */
+static inline u64 get_cycles64(void)
+{
+ u32 hi, lo;
+
+ do {
+ hi = get_cycles_hi();
+ lo = get_cycles();
+ } while (hi != get_cycles_hi());
+
+ return ((u64)hi << 32) | lo;
+}
+#endif /* CONFIG_64BIT */
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+static inline int read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = get_cycles();
+ return 0;
+}
+
+#endif /* _ASM_RISCV_TIMEX_H */
diff --git a/riscv/include/asm/tlb.h b/riscv/include/asm/tlb.h
new file mode 100644
index 0000000..50b63b5
--- /dev/null
+++ b/riscv/include/asm/tlb.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TLB_H
+#define _ASM_RISCV_TLB_H
+
+struct mmu_gather;
+
+static void tlb_flush(struct mmu_gather *tlb);
+
+#define tlb_flush tlb_flush
+#include <asm-generic/tlb.h>
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+#ifdef CONFIG_MMU
+ if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
+ flush_tlb_mm(tlb->mm);
+ else
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
+ tlb_get_unmap_size(tlb));
+#endif
+}
+
+#endif /* _ASM_RISCV_TLB_H */
diff --git a/riscv/include/asm/tlbflush.h b/riscv/include/asm/tlbflush.h
new file mode 100644
index 0000000..51664ae
--- /dev/null
+++ b/riscv/include/asm/tlbflush.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TLBFLUSH_H
+#define _ASM_RISCV_TLBFLUSH_H
+
+#include <linux/mm_types.h>
+#include <asm/smp.h>
+#include <asm/errata_list.h>
+
+#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1)
+#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
+
+#ifdef CONFIG_MMU
+extern unsigned long asid_mask;
+
+static inline void local_flush_tlb_all(void)
+{
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
+}
+
+/* Flush one page from local TLB */
+static inline void local_flush_tlb_page(unsigned long addr)
+{
+ ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
+}
+#else /* CONFIG_MMU */
+#define local_flush_tlb_all() do { } while (0)
+#define local_flush_tlb_page(addr) do { } while (0)
+#endif /* CONFIG_MMU */
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned int page_size);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#endif
+#else /* CONFIG_SMP && CONFIG_MMU */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ local_flush_tlb_all();
+}
+
+/* Flush a range of kernel pages */
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ local_flush_tlb_all();
+}
+
+#define flush_tlb_mm(mm) flush_tlb_all()
+#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
+#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
+#endif /* !CONFIG_SMP || !CONFIG_MMU */
+
+#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/riscv/include/asm/topology.h b/riscv/include/asm/topology.h
new file mode 100644
index 0000000..e316ab3
--- /dev/null
+++ b/riscv/include/asm/topology.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_TOPOLOGY_H
+#define _ASM_RISCV_TOPOLOGY_H
+
+#include <linux/arch_topology.h>
+
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_scale_freq_tick topology_scale_freq_tick
+#define arch_set_freq_scale topology_set_freq_scale
+#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
+
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_RISCV_TOPOLOGY_H */
diff --git a/riscv/include/asm/uaccess.h b/riscv/include/asm/uaccess.h
new file mode 100644
index 0000000..ec0cab9
--- /dev/null
+++ b/riscv/include/asm/uaccess.h
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This file was copied from include/asm-generic/uaccess.h
+ */
+
+#ifndef _ASM_RISCV_UACCESS_H
+#define _ASM_RISCV_UACCESS_H
+
+#include <asm/asm-extable.h>
+#include <asm/pgtable.h> /* for TASK_SIZE */
+
+/*
+ * User space memory access functions
+ */
+#ifdef CONFIG_MMU
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <asm/byteorder.h>
+#include <asm/extable.h>
+#include <asm/asm.h>
+#include <asm-generic/access_ok.h>
+
+#define __enable_user_access() \
+ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
+#define __disable_user_access() \
+ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+#define __LSW 0
+#define __MSW 1
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the address
+ * space - it must have been done previously with a separate "access_ok()"
+ * call.
+ */
+
+#define __get_user_asm(insn, x, ptr, err) \
+do { \
+ __typeof__(x) __x; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %1, %2\n" \
+ "2:\n" \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
+ : "+r" (err), "=&r" (__x) \
+ : "m" (*(ptr))); \
+ (x) = __x; \
+} while (0)
+
+#ifdef CONFIG_64BIT
+#define __get_user_8(x, ptr, err) \
+ __get_user_asm("ld", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#define __get_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u32 __lo, __hi; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " lw %1, %3\n" \
+ "2:\n" \
+ " lw %2, %4\n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
+ : "+r" (err), "=&r" (__lo), "=r" (__hi) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
+ if (err) \
+ __hi = 0; \
+ (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
+ (((u64)__hi << 32) | __lo))); \
+} while (0)
+#endif /* CONFIG_64BIT */
+
+#define __get_user_nocheck(x, __gu_ptr, __gu_err) \
+do { \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 8: \
+ __get_user_8((x), __gu_ptr, __gu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+} while (0)
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ long __gu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ \
+ __enable_user_access(); \
+ __get_user_nocheck(x, __gu_ptr, __gu_err); \
+ __disable_user_access(); \
+ \
+ __gu_err; \
+})
+
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? \
+ __get_user((x), __p) : \
+ ((x) = (__force __typeof__(x))0, -EFAULT); \
+})
+
+#define __put_user_asm(insn, x, ptr, err) \
+do { \
+ __typeof__(*(ptr)) __x = x; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %z2, %1\n" \
+ "2:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
+ : "+r" (err), "=m" (*(ptr)) \
+ : "rJ" (__x)); \
+} while (0)
+
+#ifdef CONFIG_64BIT
+#define __put_user_8(x, ptr, err) \
+ __put_user_asm("sd", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#define __put_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u64 __x = (__typeof__((x)-(x)))(x); \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " sw %z3, %1\n" \
+ "2:\n" \
+ " sw %z4, %2\n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
+ : "+r" (err), \
+ "=m" (__ptr[__LSW]), \
+ "=m" (__ptr[__MSW]) \
+ : "rJ" (__x), "rJ" (__x >> 32)); \
+} while (0)
+#endif /* CONFIG_64BIT */
+
+#define __put_user_nocheck(x, __gu_ptr, __pu_err) \
+do { \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 8: \
+ __put_user_8((x), __gu_ptr, __pu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+} while (0)
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr. The value of @x is copied to avoid
+ * re-ordering where @x is evaluated inside the block that enables user-space
+ * access (thus bypassing user space protection if @x is a function).
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*__gu_ptr) __val = (x); \
+ long __pu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ \
+ __enable_user_access(); \
+ __put_user_nocheck(__val, __gu_ptr, __pu_err); \
+ __disable_user_access(); \
+ \
+ __pu_err; \
+})
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? \
+ __put_user((x), __p) : \
+ -EFAULT; \
+})
+
+
+unsigned long __must_check __asm_copy_to_user(void __user *to,
+ const void *from, unsigned long n);
+unsigned long __must_check __asm_copy_from_user(void *to,
+ const void __user *from, unsigned long n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return __asm_copy_from_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return __asm_copy_to_user(to, from, n);
+}
+
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern long __must_check strnlen_user(const char __user *str, long n);
+
+extern
+unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+
+static inline
+unsigned long __must_check clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ return access_ok(to, n) ?
+ __clear_user(to, n) : n;
+}
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ long __kr_err; \
+ \
+ __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ long __kr_err; \
+ \
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#else /* CONFIG_MMU */
+#include <asm-generic/uaccess.h>
+#endif /* CONFIG_MMU */
+#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/riscv/include/asm/unistd.h b/riscv/include/asm/unistd.h
new file mode 100644
index 0000000..221630b
--- /dev/null
+++ b/riscv/include/asm/unistd.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+/*
+ * There is explicitly no include guard here because this file is expected to
+ * be included multiple times.
+ */
+
+#define __ARCH_WANT_SYS_CLONE
+
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_TRUNCATE64
+#define __ARCH_WANT_COMPAT_FTRUNCATE64
+#define __ARCH_WANT_COMPAT_FALLOCATE
+#define __ARCH_WANT_COMPAT_PREAD64
+#define __ARCH_WANT_COMPAT_PWRITE64
+#define __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+#define __ARCH_WANT_COMPAT_READAHEAD
+#define __ARCH_WANT_COMPAT_FADVISE64_64
+#endif
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/riscv/include/asm/uprobes.h b/riscv/include/asm/uprobes.h
new file mode 100644
index 0000000..3fc7ded
--- /dev/null
+++ b/riscv/include/asm/uprobes.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_RISCV_UPROBES_H
+#define _ASM_RISCV_UPROBES_H
+
+#include <asm/probes.h>
+#include <asm/patch.h>
+#include <asm/bug.h>
+
+#define MAX_UINSN_BYTES 8
+
+#ifdef CONFIG_RISCV_ISA_C
+#define UPROBE_SWBP_INSN __BUG_INSN_16
+#define UPROBE_SWBP_INSN_SIZE 2
+#else
+#define UPROBE_SWBP_INSN __BUG_INSN_32
+#define UPROBE_SWBP_INSN_SIZE 4
+#endif
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+ unsigned long saved_cause;
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ unsigned long insn_size;
+ bool simulate;
+};
+
+#ifdef CONFIG_UPROBES
+bool uprobe_breakpoint_handler(struct pt_regs *regs);
+bool uprobe_single_step_handler(struct pt_regs *regs);
+#else
+static inline bool uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline bool uprobe_single_step_handler(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_UPROBES */
+#endif /* _ASM_RISCV_UPROBES_H */
diff --git a/riscv/include/asm/vdso.h b/riscv/include/asm/vdso.h
new file mode 100644
index 0000000..f891478
--- /dev/null
+++ b/riscv/include/asm/vdso.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2014 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_VDSO_H
+#define _ASM_RISCV_VDSO_H
+
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefore don't have one.
+ */
+#ifdef CONFIG_MMU
+
+#define __VVAR_PAGES 2
+
+#ifndef __ASSEMBLY__
+#include <generated/vdso-offsets.h>
+
+#define VDSO_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+#ifdef CONFIG_COMPAT
+#include <generated/compat_vdso-offsets.h>
+
+#define COMPAT_VDSO_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + compat__vdso_##name##_offset)
+
+extern char compat_vdso_start[], compat_vdso_end[];
+
+#endif /* CONFIG_COMPAT */
+
+extern char vdso_start[], vdso_end[];
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_VDSO_H */
diff --git a/riscv/include/asm/vdso/clocksource.h b/riscv/include/asm/vdso/clocksource.h
new file mode 100644
index 0000000..df6ea65
--- /dev/null
+++ b/riscv/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/riscv/include/asm/vdso/data.h b/riscv/include/asm/vdso/data.h
new file mode 100644
index 0000000..dc2f76f
--- /dev/null
+++ b/riscv/include/asm/vdso/data.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RISCV_ASM_VDSO_DATA_H
+#define __RISCV_ASM_VDSO_DATA_H
+
+#include <linux/types.h>
+#include <vdso/datapage.h>
+#include <asm/hwprobe.h>
+
+struct arch_vdso_data {
+ /* Stash static answers to the hwprobe queries when all CPUs are selected. */
+ __u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1];
+
+ /* Boolean indicating all CPUs have the same static hwprobe values. */
+ __u8 homogeneous_cpus;
+};
+
+#endif /* __RISCV_ASM_VDSO_DATA_H */
diff --git a/riscv/include/asm/vdso/gettimeofday.h b/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644
index 0000000..ba3283c
--- /dev/null
+++ b/riscv/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+/*
+ * 32-bit land is lacking generic time vsyscalls as well as the legacy 32-bit
+ * time syscalls like gettimeofday. Skip these definitions since on 32-bit.
+ */
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_gettimeofday;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_gettime;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_getres;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+#endif /* CONFIG_GENERIC_TIME_VSYSCALL */
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+ /*
+ * The purpose of csr_read(CSR_TIME) is to trap the system into
+ * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+ * architecture, no fence instructions surround the csr_read()
+ */
+ return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return _timens_data;
+}
+#endif
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/riscv/include/asm/vdso/processor.h b/riscv/include/asm/vdso/processor.h
new file mode 100644
index 0000000..96b65a5
--- /dev/null
+++ b/riscv/include/asm/vdso/processor.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+
+#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+ */
+ __asm__ __volatile__ ("pause");
+#else
+ /* Encoding of the pause instruction */
+ __asm__ __volatile__ (".4byte 0x100000F");
+#endif
+ barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/riscv/include/asm/vdso/vsyscall.h b/riscv/include/asm/vdso/vsyscall.h
new file mode 100644
index 0000000..82fd5d8
--- /dev/null
+++ b/riscv/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/riscv/include/asm/vector.h b/riscv/include/asm/vector.h
new file mode 100644
index 0000000..87aaef6
--- /dev/null
+++ b/riscv/include/asm/vector.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef __ASM_RISCV_VECTOR_H
+#define __ASM_RISCV_VECTOR_H
+
+#include <linux/types.h>
+#include <uapi/asm-generic/errno.h>
+
+#ifdef CONFIG_RISCV_ISA_V
+
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm/ptrace.h>
+#include <asm/cpufeature.h>
+#include <asm/csr.h>
+#include <asm/asm.h>
+
+extern unsigned long riscv_v_vsize;
+int riscv_v_setup_vsize(void);
+bool riscv_v_first_use_handler(struct pt_regs *regs);
+
+static __always_inline bool has_vector(void)
+{
+ return riscv_has_extension_unlikely(RISCV_ISA_EXT_v);
+}
+
+static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN;
+}
+
+static inline void __riscv_v_vstate_dirty(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_DIRTY;
+}
+
+static inline void riscv_v_vstate_off(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_OFF;
+}
+
+static inline void riscv_v_vstate_on(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_INITIAL;
+}
+
+static inline bool riscv_v_vstate_query(struct pt_regs *regs)
+{
+ return (regs->status & SR_VS) != 0;
+}
+
+static __always_inline void riscv_v_enable(void)
+{
+ csr_set(CSR_SSTATUS, SR_VS);
+}
+
+static __always_inline void riscv_v_disable(void)
+{
+ csr_clear(CSR_SSTATUS, SR_VS);
+}
+
+static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
+{
+ asm volatile (
+ "csrr %0, " __stringify(CSR_VSTART) "\n\t"
+ "csrr %1, " __stringify(CSR_VTYPE) "\n\t"
+ "csrr %2, " __stringify(CSR_VL) "\n\t"
+ "csrr %3, " __stringify(CSR_VCSR) "\n\t"
+ "csrr %4, " __stringify(CSR_VLENB) "\n\t"
+ : "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl),
+ "=r" (dest->vcsr), "=r" (dest->vlenb) : :);
+}
+
+static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src)
+{
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvl x0, %2, %1\n\t"
+ ".option pop\n\t"
+ "csrw " __stringify(CSR_VSTART) ", %0\n\t"
+ "csrw " __stringify(CSR_VCSR) ", %3\n\t"
+ : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl),
+ "r" (src->vcsr) :);
+}
+
+static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
+ void *datap)
+{
+ unsigned long vl;
+
+ riscv_v_enable();
+ __vstate_csr_save(save_to);
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vse8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ riscv_v_disable();
+}
+
+static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_from,
+ void *datap)
+{
+ unsigned long vl;
+
+ riscv_v_enable();
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vle8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ __vstate_csr_restore(restore_from);
+ riscv_v_disable();
+}
+
+static inline void __riscv_v_vstate_discard(void)
+{
+ unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1);
+
+ riscv_v_enable();
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vmv.v.i v0, -1\n\t"
+ "vmv.v.i v8, -1\n\t"
+ "vmv.v.i v16, -1\n\t"
+ "vmv.v.i v24, -1\n\t"
+ "vsetvl %0, x0, %1\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (vtype_inval) : "memory");
+ riscv_v_disable();
+}
+
+static inline void riscv_v_vstate_discard(struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) == SR_VS_OFF)
+ return;
+
+ __riscv_v_vstate_discard();
+ __riscv_v_vstate_dirty(regs);
+}
+
+static inline void riscv_v_vstate_save(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) == SR_VS_DIRTY) {
+ struct __riscv_v_ext_state *vstate = &task->thread.vstate;
+
+ __riscv_v_vstate_save(vstate, vstate->datap);
+ __riscv_v_vstate_clean(regs);
+ }
+}
+
+static inline void riscv_v_vstate_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) != SR_VS_OFF) {
+ struct __riscv_v_ext_state *vstate = &task->thread.vstate;
+
+ __riscv_v_vstate_restore(vstate, vstate->datap);
+ __riscv_v_vstate_clean(regs);
+ }
+}
+
+static inline void __switch_to_vector(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(prev);
+ riscv_v_vstate_save(prev, regs);
+ riscv_v_vstate_restore(next, task_pt_regs(next));
+}
+
+void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
+bool riscv_v_vstate_ctrl_user_allowed(void);
+
+#else /* ! CONFIG_RISCV_ISA_V */
+
+struct pt_regs;
+
+static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
+static __always_inline bool has_vector(void) { return false; }
+static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
+static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
+static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
+#define riscv_v_vsize (0)
+#define riscv_v_vstate_discard(regs) do {} while (0)
+#define riscv_v_vstate_save(task, regs) do {} while (0)
+#define riscv_v_vstate_restore(task, regs) do {} while (0)
+#define __switch_to_vector(__prev, __next) do {} while (0)
+#define riscv_v_vstate_off(regs) do {} while (0)
+#define riscv_v_vstate_on(regs) do {} while (0)
+
+#endif /* CONFIG_RISCV_ISA_V */
+
+#endif /* ! __ASM_RISCV_VECTOR_H */
diff --git a/riscv/include/asm/vendorid_list.h b/riscv/include/asm/vendorid_list.h
new file mode 100644
index 0000000..e55407a
--- /dev/null
+++ b/riscv/include/asm/vendorid_list.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+#ifndef ASM_VENDOR_LIST_H
+#define ASM_VENDOR_LIST_H
+
+#define ANDESTECH_VENDOR_ID 0x31e
+#define SIFIVE_VENDOR_ID 0x489
+#define THEAD_VENDOR_ID 0x5b7
+
+#endif
diff --git a/riscv/include/asm/vermagic.h b/riscv/include/asm/vermagic.h
new file mode 100644
index 0000000..7b9441a
--- /dev/null
+++ b/riscv/include/asm/vermagic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC "riscv"
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/riscv/include/asm/vmalloc.h b/riscv/include/asm/vmalloc.h
new file mode 100644
index 0000000..924d01b
--- /dev/null
+++ b/riscv/include/asm/vmalloc.h
@@ -0,0 +1,83 @@
+#ifndef _ASM_RISCV_VMALLOC_H
+#define _ASM_RISCV_VMALLOC_H
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+extern bool pgtable_l4_enabled, pgtable_l5_enabled;
+
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ return pgtable_l4_enabled || pgtable_l5_enabled;
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return true;
+}
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#include <linux/pgtable.h>
+
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ unsigned long map_size = PAGE_SIZE;
+ unsigned long size, order;
+
+ if (!has_svnapot())
+ return map_size;
+
+ for_each_napot_order_rev(order) {
+ if (napot_cont_shift(order) > max_page_shift)
+ continue;
+
+ size = napot_cont_size(order);
+ if (end - addr < size)
+ continue;
+
+ if (!IS_ALIGNED(addr, size))
+ continue;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ continue;
+
+ map_size = size;
+ break;
+ }
+
+ return map_size;
+}
+
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ int shift = PAGE_SHIFT;
+ unsigned long order;
+
+ if (!has_svnapot())
+ return shift;
+
+ WARN_ON_ONCE(size >= PMD_SIZE);
+
+ for_each_napot_order_rev(order) {
+ if (napot_cont_size(order) > size)
+ continue;
+
+ if (!IS_ALIGNED(size, napot_cont_size(order)))
+ continue;
+
+ shift = napot_cont_shift(order);
+ break;
+ }
+
+ return shift;
+}
+
+#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+#endif /* _ASM_RISCV_VMALLOC_H */
diff --git a/riscv/include/asm/word-at-a-time.h b/riscv/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..7c086ac
--- /dev/null
+++ b/riscv/include/asm/word-at-a-time.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * Derived from arch/x86/include/asm/word-at-a-time.h
+ */
+
+#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
+#define _ASM_RISCV_WORD_AT_A_TIME_H
+
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long val,
+ unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val,
+ unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/riscv/include/asm/xip_fixup.h b/riscv/include/asm/xip_fixup.h
new file mode 100644
index 0000000..f3c94cd
--- /dev/null
+++ b/riscv/include/asm/xip_fixup.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * XIP fixup macros, only useful in assembly.
+ */
+#ifndef _ASM_RISCV_XIP_FIXUP_H
+#define _ASM_RISCV_XIP_FIXUP_H
+
+//#include <linux/pgtable.h>
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t0, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+.endm
+
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif
diff --git a/riscv/include/asm/xip_fixup.h~ b/riscv/include/asm/xip_fixup.h~
new file mode 100644
index 0000000..b65bf63
--- /dev/null
+++ b/riscv/include/asm/xip_fixup.h~
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * XIP fixup macros, only useful in assembly.
+ */
+#ifndef _ASM_RISCV_XIP_FIXUP_H
+#define _ASM_RISCV_XIP_FIXUP_H
+
+#include <linux/pgtable.h>
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t0, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+.endm
+
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif
diff --git a/riscv/include/generated/.compat_vdso-offsets.h.cmd b/riscv/include/generated/.compat_vdso-offsets.h.cmd
new file mode 100644
index 0000000..7b7eac3
--- /dev/null
+++ b/riscv/include/generated/.compat_vdso-offsets.h.cmd
@@ -0,0 +1 @@
+savedcmd_include/generated/compat_vdso-offsets.h := riscv64-unknown-elf-nm arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg | ./arch/riscv/kernel/compat_vdso/gen_compat_vdso_offsets.sh | LC_ALL=C sort > include/generated/compat_vdso-offsets.h
diff --git a/riscv/include/generated/.vdso-offsets.h.cmd b/riscv/include/generated/.vdso-offsets.h.cmd
new file mode 100644
index 0000000..46b66b9
--- /dev/null
+++ b/riscv/include/generated/.vdso-offsets.h.cmd
@@ -0,0 +1 @@
+savedcmd_include/generated/vdso-offsets.h := riscv64-unknown-elf-nm arch/riscv/kernel/vdso/vdso.so.dbg | ./arch/riscv/kernel/vdso/gen_vdso_offsets.sh | LC_ALL=C sort > include/generated/vdso-offsets.h
diff --git a/riscv/include/generated/asm-offsets.h b/riscv/include/generated/asm-offsets.h
new file mode 100644
index 0000000..3fd2fe6
--- /dev/null
+++ b/riscv/include/generated/asm-offsets.h
@@ -0,0 +1,304 @@
+#ifndef __ASM_OFFSETS_H__
+#define __ASM_OFFSETS_H__
+/*
+ * DO NOT MODIFY.
+ *
+ * This file was generated by Kbuild
+ */
+
+#define TASK_THREAD_RA 2576 /* offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_SP 2584 /* offsetof(struct task_struct, thread.sp) */
+#define TASK_THREAD_S0 2592 /* offsetof(struct task_struct, thread.s[0]) */
+#define TASK_THREAD_S1 2600 /* offsetof(struct task_struct, thread.s[1]) */
+#define TASK_THREAD_S2 2608 /* offsetof(struct task_struct, thread.s[2]) */
+#define TASK_THREAD_S3 2616 /* offsetof(struct task_struct, thread.s[3]) */
+#define TASK_THREAD_S4 2624 /* offsetof(struct task_struct, thread.s[4]) */
+#define TASK_THREAD_S5 2632 /* offsetof(struct task_struct, thread.s[5]) */
+#define TASK_THREAD_S6 2640 /* offsetof(struct task_struct, thread.s[6]) */
+#define TASK_THREAD_S7 2648 /* offsetof(struct task_struct, thread.s[7]) */
+#define TASK_THREAD_S8 2656 /* offsetof(struct task_struct, thread.s[8]) */
+#define TASK_THREAD_S9 2664 /* offsetof(struct task_struct, thread.s[9]) */
+#define TASK_THREAD_S10 2672 /* offsetof(struct task_struct, thread.s[10]) */
+#define TASK_THREAD_S11 2680 /* offsetof(struct task_struct, thread.s[11]) */
+#define TASK_TI_FLAGS 0 /* offsetof(struct task_struct, thread_info.flags) */
+#define TASK_TI_PREEMPT_COUNT 8 /* offsetof(struct task_struct, thread_info.preempt_count) */
+#define TASK_TI_KERNEL_SP 16 /* offsetof(struct task_struct, thread_info.kernel_sp) */
+#define TASK_TI_USER_SP 24 /* offsetof(struct task_struct, thread_info.user_sp) */
+#define TASK_TI_CPU_NUM 32 /* offsetof(struct task_struct, thread_info.cpu) */
+#define TASK_THREAD_F0 2688 /* offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F1 2696 /* offsetof(struct task_struct, thread.fstate.f[1]) */
+#define TASK_THREAD_F2 2704 /* offsetof(struct task_struct, thread.fstate.f[2]) */
+#define TASK_THREAD_F3 2712 /* offsetof(struct task_struct, thread.fstate.f[3]) */
+#define TASK_THREAD_F4 2720 /* offsetof(struct task_struct, thread.fstate.f[4]) */
+#define TASK_THREAD_F5 2728 /* offsetof(struct task_struct, thread.fstate.f[5]) */
+#define TASK_THREAD_F6 2736 /* offsetof(struct task_struct, thread.fstate.f[6]) */
+#define TASK_THREAD_F7 2744 /* offsetof(struct task_struct, thread.fstate.f[7]) */
+#define TASK_THREAD_F8 2752 /* offsetof(struct task_struct, thread.fstate.f[8]) */
+#define TASK_THREAD_F9 2760 /* offsetof(struct task_struct, thread.fstate.f[9]) */
+#define TASK_THREAD_F10 2768 /* offsetof(struct task_struct, thread.fstate.f[10]) */
+#define TASK_THREAD_F11 2776 /* offsetof(struct task_struct, thread.fstate.f[11]) */
+#define TASK_THREAD_F12 2784 /* offsetof(struct task_struct, thread.fstate.f[12]) */
+#define TASK_THREAD_F13 2792 /* offsetof(struct task_struct, thread.fstate.f[13]) */
+#define TASK_THREAD_F14 2800 /* offsetof(struct task_struct, thread.fstate.f[14]) */
+#define TASK_THREAD_F15 2808 /* offsetof(struct task_struct, thread.fstate.f[15]) */
+#define TASK_THREAD_F16 2816 /* offsetof(struct task_struct, thread.fstate.f[16]) */
+#define TASK_THREAD_F17 2824 /* offsetof(struct task_struct, thread.fstate.f[17]) */
+#define TASK_THREAD_F18 2832 /* offsetof(struct task_struct, thread.fstate.f[18]) */
+#define TASK_THREAD_F19 2840 /* offsetof(struct task_struct, thread.fstate.f[19]) */
+#define TASK_THREAD_F20 2848 /* offsetof(struct task_struct, thread.fstate.f[20]) */
+#define TASK_THREAD_F21 2856 /* offsetof(struct task_struct, thread.fstate.f[21]) */
+#define TASK_THREAD_F22 2864 /* offsetof(struct task_struct, thread.fstate.f[22]) */
+#define TASK_THREAD_F23 2872 /* offsetof(struct task_struct, thread.fstate.f[23]) */
+#define TASK_THREAD_F24 2880 /* offsetof(struct task_struct, thread.fstate.f[24]) */
+#define TASK_THREAD_F25 2888 /* offsetof(struct task_struct, thread.fstate.f[25]) */
+#define TASK_THREAD_F26 2896 /* offsetof(struct task_struct, thread.fstate.f[26]) */
+#define TASK_THREAD_F27 2904 /* offsetof(struct task_struct, thread.fstate.f[27]) */
+#define TASK_THREAD_F28 2912 /* offsetof(struct task_struct, thread.fstate.f[28]) */
+#define TASK_THREAD_F29 2920 /* offsetof(struct task_struct, thread.fstate.f[29]) */
+#define TASK_THREAD_F30 2928 /* offsetof(struct task_struct, thread.fstate.f[30]) */
+#define TASK_THREAD_F31 2936 /* offsetof(struct task_struct, thread.fstate.f[31]) */
+#define TASK_THREAD_FCSR 2944 /* offsetof(struct task_struct, thread.fstate.fcsr) */
+#define TSK_STACK_CANARY 1088 /* offsetof(struct task_struct, stack_canary) */
+#define PT_SIZE 288 /* sizeof(struct pt_regs) */
+#define PT_EPC 0 /* offsetof(struct pt_regs, epc) */
+#define PT_RA 8 /* offsetof(struct pt_regs, ra) */
+#define PT_FP 64 /* offsetof(struct pt_regs, s0) */
+#define PT_S0 64 /* offsetof(struct pt_regs, s0) */
+#define PT_S1 72 /* offsetof(struct pt_regs, s1) */
+#define PT_S2 144 /* offsetof(struct pt_regs, s2) */
+#define PT_S3 152 /* offsetof(struct pt_regs, s3) */
+#define PT_S4 160 /* offsetof(struct pt_regs, s4) */
+#define PT_S5 168 /* offsetof(struct pt_regs, s5) */
+#define PT_S6 176 /* offsetof(struct pt_regs, s6) */
+#define PT_S7 184 /* offsetof(struct pt_regs, s7) */
+#define PT_S8 192 /* offsetof(struct pt_regs, s8) */
+#define PT_S9 200 /* offsetof(struct pt_regs, s9) */
+#define PT_S10 208 /* offsetof(struct pt_regs, s10) */
+#define PT_S11 216 /* offsetof(struct pt_regs, s11) */
+#define PT_SP 16 /* offsetof(struct pt_regs, sp) */
+#define PT_TP 32 /* offsetof(struct pt_regs, tp) */
+#define PT_A0 80 /* offsetof(struct pt_regs, a0) */
+#define PT_A1 88 /* offsetof(struct pt_regs, a1) */
+#define PT_A2 96 /* offsetof(struct pt_regs, a2) */
+#define PT_A3 104 /* offsetof(struct pt_regs, a3) */
+#define PT_A4 112 /* offsetof(struct pt_regs, a4) */
+#define PT_A5 120 /* offsetof(struct pt_regs, a5) */
+#define PT_A6 128 /* offsetof(struct pt_regs, a6) */
+#define PT_A7 136 /* offsetof(struct pt_regs, a7) */
+#define PT_T0 40 /* offsetof(struct pt_regs, t0) */
+#define PT_T1 48 /* offsetof(struct pt_regs, t1) */
+#define PT_T2 56 /* offsetof(struct pt_regs, t2) */
+#define PT_T3 224 /* offsetof(struct pt_regs, t3) */
+#define PT_T4 232 /* offsetof(struct pt_regs, t4) */
+#define PT_T5 240 /* offsetof(struct pt_regs, t5) */
+#define PT_T6 248 /* offsetof(struct pt_regs, t6) */
+#define PT_GP 24 /* offsetof(struct pt_regs, gp) */
+#define PT_ORIG_A0 280 /* offsetof(struct pt_regs, orig_a0) */
+#define PT_STATUS 256 /* offsetof(struct pt_regs, status) */
+#define PT_BADADDR 264 /* offsetof(struct pt_regs, badaddr) */
+#define PT_CAUSE 272 /* offsetof(struct pt_regs, cause) */
+#define SUSPEND_CONTEXT_REGS 0 /* offsetof(struct suspend_context, regs) */
+#define HIBERN_PBE_ADDR 0 /* offsetof(struct pbe, address) */
+#define HIBERN_PBE_ORIG 8 /* offsetof(struct pbe, orig_address) */
+#define HIBERN_PBE_NEXT 16 /* offsetof(struct pbe, next) */
+#define KVM_ARCH_GUEST_ZERO 944 /* offsetof(struct kvm_vcpu_arch, guest_context.zero) */
+#define KVM_ARCH_GUEST_RA 952 /* offsetof(struct kvm_vcpu_arch, guest_context.ra) */
+#define KVM_ARCH_GUEST_SP 960 /* offsetof(struct kvm_vcpu_arch, guest_context.sp) */
+#define KVM_ARCH_GUEST_GP 968 /* offsetof(struct kvm_vcpu_arch, guest_context.gp) */
+#define KVM_ARCH_GUEST_TP 976 /* offsetof(struct kvm_vcpu_arch, guest_context.tp) */
+#define KVM_ARCH_GUEST_T0 984 /* offsetof(struct kvm_vcpu_arch, guest_context.t0) */
+#define KVM_ARCH_GUEST_T1 992 /* offsetof(struct kvm_vcpu_arch, guest_context.t1) */
+#define KVM_ARCH_GUEST_T2 1000 /* offsetof(struct kvm_vcpu_arch, guest_context.t2) */
+#define KVM_ARCH_GUEST_S0 1008 /* offsetof(struct kvm_vcpu_arch, guest_context.s0) */
+#define KVM_ARCH_GUEST_S1 1016 /* offsetof(struct kvm_vcpu_arch, guest_context.s1) */
+#define KVM_ARCH_GUEST_A0 1024 /* offsetof(struct kvm_vcpu_arch, guest_context.a0) */
+#define KVM_ARCH_GUEST_A1 1032 /* offsetof(struct kvm_vcpu_arch, guest_context.a1) */
+#define KVM_ARCH_GUEST_A2 1040 /* offsetof(struct kvm_vcpu_arch, guest_context.a2) */
+#define KVM_ARCH_GUEST_A3 1048 /* offsetof(struct kvm_vcpu_arch, guest_context.a3) */
+#define KVM_ARCH_GUEST_A4 1056 /* offsetof(struct kvm_vcpu_arch, guest_context.a4) */
+#define KVM_ARCH_GUEST_A5 1064 /* offsetof(struct kvm_vcpu_arch, guest_context.a5) */
+#define KVM_ARCH_GUEST_A6 1072 /* offsetof(struct kvm_vcpu_arch, guest_context.a6) */
+#define KVM_ARCH_GUEST_A7 1080 /* offsetof(struct kvm_vcpu_arch, guest_context.a7) */
+#define KVM_ARCH_GUEST_S2 1088 /* offsetof(struct kvm_vcpu_arch, guest_context.s2) */
+#define KVM_ARCH_GUEST_S3 1096 /* offsetof(struct kvm_vcpu_arch, guest_context.s3) */
+#define KVM_ARCH_GUEST_S4 1104 /* offsetof(struct kvm_vcpu_arch, guest_context.s4) */
+#define KVM_ARCH_GUEST_S5 1112 /* offsetof(struct kvm_vcpu_arch, guest_context.s5) */
+#define KVM_ARCH_GUEST_S6 1120 /* offsetof(struct kvm_vcpu_arch, guest_context.s6) */
+#define KVM_ARCH_GUEST_S7 1128 /* offsetof(struct kvm_vcpu_arch, guest_context.s7) */
+#define KVM_ARCH_GUEST_S8 1136 /* offsetof(struct kvm_vcpu_arch, guest_context.s8) */
+#define KVM_ARCH_GUEST_S9 1144 /* offsetof(struct kvm_vcpu_arch, guest_context.s9) */
+#define KVM_ARCH_GUEST_S10 1152 /* offsetof(struct kvm_vcpu_arch, guest_context.s10) */
+#define KVM_ARCH_GUEST_S11 1160 /* offsetof(struct kvm_vcpu_arch, guest_context.s11) */
+#define KVM_ARCH_GUEST_T3 1168 /* offsetof(struct kvm_vcpu_arch, guest_context.t3) */
+#define KVM_ARCH_GUEST_T4 1176 /* offsetof(struct kvm_vcpu_arch, guest_context.t4) */
+#define KVM_ARCH_GUEST_T5 1184 /* offsetof(struct kvm_vcpu_arch, guest_context.t5) */
+#define KVM_ARCH_GUEST_T6 1192 /* offsetof(struct kvm_vcpu_arch, guest_context.t6) */
+#define KVM_ARCH_GUEST_SEPC 1200 /* offsetof(struct kvm_vcpu_arch, guest_context.sepc) */
+#define KVM_ARCH_GUEST_SSTATUS 1208 /* offsetof(struct kvm_vcpu_arch, guest_context.sstatus) */
+#define KVM_ARCH_GUEST_HSTATUS 1216 /* offsetof(struct kvm_vcpu_arch, guest_context.hstatus) */
+#define KVM_ARCH_GUEST_SCOUNTEREN 1880 /* offsetof(struct kvm_vcpu_arch, guest_csr.scounteren) */
+#define KVM_ARCH_HOST_ZERO 80 /* offsetof(struct kvm_vcpu_arch, host_context.zero) */
+#define KVM_ARCH_HOST_RA 88 /* offsetof(struct kvm_vcpu_arch, host_context.ra) */
+#define KVM_ARCH_HOST_SP 96 /* offsetof(struct kvm_vcpu_arch, host_context.sp) */
+#define KVM_ARCH_HOST_GP 104 /* offsetof(struct kvm_vcpu_arch, host_context.gp) */
+#define KVM_ARCH_HOST_TP 112 /* offsetof(struct kvm_vcpu_arch, host_context.tp) */
+#define KVM_ARCH_HOST_T0 120 /* offsetof(struct kvm_vcpu_arch, host_context.t0) */
+#define KVM_ARCH_HOST_T1 128 /* offsetof(struct kvm_vcpu_arch, host_context.t1) */
+#define KVM_ARCH_HOST_T2 136 /* offsetof(struct kvm_vcpu_arch, host_context.t2) */
+#define KVM_ARCH_HOST_S0 144 /* offsetof(struct kvm_vcpu_arch, host_context.s0) */
+#define KVM_ARCH_HOST_S1 152 /* offsetof(struct kvm_vcpu_arch, host_context.s1) */
+#define KVM_ARCH_HOST_A0 160 /* offsetof(struct kvm_vcpu_arch, host_context.a0) */
+#define KVM_ARCH_HOST_A1 168 /* offsetof(struct kvm_vcpu_arch, host_context.a1) */
+#define KVM_ARCH_HOST_A2 176 /* offsetof(struct kvm_vcpu_arch, host_context.a2) */
+#define KVM_ARCH_HOST_A3 184 /* offsetof(struct kvm_vcpu_arch, host_context.a3) */
+#define KVM_ARCH_HOST_A4 192 /* offsetof(struct kvm_vcpu_arch, host_context.a4) */
+#define KVM_ARCH_HOST_A5 200 /* offsetof(struct kvm_vcpu_arch, host_context.a5) */
+#define KVM_ARCH_HOST_A6 208 /* offsetof(struct kvm_vcpu_arch, host_context.a6) */
+#define KVM_ARCH_HOST_A7 216 /* offsetof(struct kvm_vcpu_arch, host_context.a7) */
+#define KVM_ARCH_HOST_S2 224 /* offsetof(struct kvm_vcpu_arch, host_context.s2) */
+#define KVM_ARCH_HOST_S3 232 /* offsetof(struct kvm_vcpu_arch, host_context.s3) */
+#define KVM_ARCH_HOST_S4 240 /* offsetof(struct kvm_vcpu_arch, host_context.s4) */
+#define KVM_ARCH_HOST_S5 248 /* offsetof(struct kvm_vcpu_arch, host_context.s5) */
+#define KVM_ARCH_HOST_S6 256 /* offsetof(struct kvm_vcpu_arch, host_context.s6) */
+#define KVM_ARCH_HOST_S7 264 /* offsetof(struct kvm_vcpu_arch, host_context.s7) */
+#define KVM_ARCH_HOST_S8 272 /* offsetof(struct kvm_vcpu_arch, host_context.s8) */
+#define KVM_ARCH_HOST_S9 280 /* offsetof(struct kvm_vcpu_arch, host_context.s9) */
+#define KVM_ARCH_HOST_S10 288 /* offsetof(struct kvm_vcpu_arch, host_context.s10) */
+#define KVM_ARCH_HOST_S11 296 /* offsetof(struct kvm_vcpu_arch, host_context.s11) */
+#define KVM_ARCH_HOST_T3 304 /* offsetof(struct kvm_vcpu_arch, host_context.t3) */
+#define KVM_ARCH_HOST_T4 312 /* offsetof(struct kvm_vcpu_arch, host_context.t4) */
+#define KVM_ARCH_HOST_T5 320 /* offsetof(struct kvm_vcpu_arch, host_context.t5) */
+#define KVM_ARCH_HOST_T6 328 /* offsetof(struct kvm_vcpu_arch, host_context.t6) */
+#define KVM_ARCH_HOST_SEPC 336 /* offsetof(struct kvm_vcpu_arch, host_context.sepc) */
+#define KVM_ARCH_HOST_SSTATUS 344 /* offsetof(struct kvm_vcpu_arch, host_context.sstatus) */
+#define KVM_ARCH_HOST_HSTATUS 352 /* offsetof(struct kvm_vcpu_arch, host_context.hstatus) */
+#define KVM_ARCH_HOST_SSCRATCH 40 /* offsetof(struct kvm_vcpu_arch, host_sscratch) */
+#define KVM_ARCH_HOST_STVEC 48 /* offsetof(struct kvm_vcpu_arch, host_stvec) */
+#define KVM_ARCH_HOST_SCOUNTEREN 56 /* offsetof(struct kvm_vcpu_arch, host_scounteren) */
+#define KVM_ARCH_TRAP_SEPC 0 /* offsetof(struct kvm_cpu_trap, sepc) */
+#define KVM_ARCH_TRAP_SCAUSE 8 /* offsetof(struct kvm_cpu_trap, scause) */
+#define KVM_ARCH_TRAP_STVAL 16 /* offsetof(struct kvm_cpu_trap, stval) */
+#define KVM_ARCH_TRAP_HTVAL 24 /* offsetof(struct kvm_cpu_trap, htval) */
+#define KVM_ARCH_TRAP_HTINST 32 /* offsetof(struct kvm_cpu_trap, htinst) */
+#define KVM_ARCH_FP_F_F0 288 /* offsetof(struct kvm_cpu_context, fp.f.f[0]) */
+#define KVM_ARCH_FP_F_F1 292 /* offsetof(struct kvm_cpu_context, fp.f.f[1]) */
+#define KVM_ARCH_FP_F_F2 296 /* offsetof(struct kvm_cpu_context, fp.f.f[2]) */
+#define KVM_ARCH_FP_F_F3 300 /* offsetof(struct kvm_cpu_context, fp.f.f[3]) */
+#define KVM_ARCH_FP_F_F4 304 /* offsetof(struct kvm_cpu_context, fp.f.f[4]) */
+#define KVM_ARCH_FP_F_F5 308 /* offsetof(struct kvm_cpu_context, fp.f.f[5]) */
+#define KVM_ARCH_FP_F_F6 312 /* offsetof(struct kvm_cpu_context, fp.f.f[6]) */
+#define KVM_ARCH_FP_F_F7 316 /* offsetof(struct kvm_cpu_context, fp.f.f[7]) */
+#define KVM_ARCH_FP_F_F8 320 /* offsetof(struct kvm_cpu_context, fp.f.f[8]) */
+#define KVM_ARCH_FP_F_F9 324 /* offsetof(struct kvm_cpu_context, fp.f.f[9]) */
+#define KVM_ARCH_FP_F_F10 328 /* offsetof(struct kvm_cpu_context, fp.f.f[10]) */
+#define KVM_ARCH_FP_F_F11 332 /* offsetof(struct kvm_cpu_context, fp.f.f[11]) */
+#define KVM_ARCH_FP_F_F12 336 /* offsetof(struct kvm_cpu_context, fp.f.f[12]) */
+#define KVM_ARCH_FP_F_F13 340 /* offsetof(struct kvm_cpu_context, fp.f.f[13]) */
+#define KVM_ARCH_FP_F_F14 344 /* offsetof(struct kvm_cpu_context, fp.f.f[14]) */
+#define KVM_ARCH_FP_F_F15 348 /* offsetof(struct kvm_cpu_context, fp.f.f[15]) */
+#define KVM_ARCH_FP_F_F16 352 /* offsetof(struct kvm_cpu_context, fp.f.f[16]) */
+#define KVM_ARCH_FP_F_F17 356 /* offsetof(struct kvm_cpu_context, fp.f.f[17]) */
+#define KVM_ARCH_FP_F_F18 360 /* offsetof(struct kvm_cpu_context, fp.f.f[18]) */
+#define KVM_ARCH_FP_F_F19 364 /* offsetof(struct kvm_cpu_context, fp.f.f[19]) */
+#define KVM_ARCH_FP_F_F20 368 /* offsetof(struct kvm_cpu_context, fp.f.f[20]) */
+#define KVM_ARCH_FP_F_F21 372 /* offsetof(struct kvm_cpu_context, fp.f.f[21]) */
+#define KVM_ARCH_FP_F_F22 376 /* offsetof(struct kvm_cpu_context, fp.f.f[22]) */
+#define KVM_ARCH_FP_F_F23 380 /* offsetof(struct kvm_cpu_context, fp.f.f[23]) */
+#define KVM_ARCH_FP_F_F24 384 /* offsetof(struct kvm_cpu_context, fp.f.f[24]) */
+#define KVM_ARCH_FP_F_F25 388 /* offsetof(struct kvm_cpu_context, fp.f.f[25]) */
+#define KVM_ARCH_FP_F_F26 392 /* offsetof(struct kvm_cpu_context, fp.f.f[26]) */
+#define KVM_ARCH_FP_F_F27 396 /* offsetof(struct kvm_cpu_context, fp.f.f[27]) */
+#define KVM_ARCH_FP_F_F28 400 /* offsetof(struct kvm_cpu_context, fp.f.f[28]) */
+#define KVM_ARCH_FP_F_F29 404 /* offsetof(struct kvm_cpu_context, fp.f.f[29]) */
+#define KVM_ARCH_FP_F_F30 408 /* offsetof(struct kvm_cpu_context, fp.f.f[30]) */
+#define KVM_ARCH_FP_F_F31 412 /* offsetof(struct kvm_cpu_context, fp.f.f[31]) */
+#define KVM_ARCH_FP_F_FCSR 416 /* offsetof(struct kvm_cpu_context, fp.f.fcsr) */
+#define KVM_ARCH_FP_D_F0 288 /* offsetof(struct kvm_cpu_context, fp.d.f[0]) */
+#define KVM_ARCH_FP_D_F1 296 /* offsetof(struct kvm_cpu_context, fp.d.f[1]) */
+#define KVM_ARCH_FP_D_F2 304 /* offsetof(struct kvm_cpu_context, fp.d.f[2]) */
+#define KVM_ARCH_FP_D_F3 312 /* offsetof(struct kvm_cpu_context, fp.d.f[3]) */
+#define KVM_ARCH_FP_D_F4 320 /* offsetof(struct kvm_cpu_context, fp.d.f[4]) */
+#define KVM_ARCH_FP_D_F5 328 /* offsetof(struct kvm_cpu_context, fp.d.f[5]) */
+#define KVM_ARCH_FP_D_F6 336 /* offsetof(struct kvm_cpu_context, fp.d.f[6]) */
+#define KVM_ARCH_FP_D_F7 344 /* offsetof(struct kvm_cpu_context, fp.d.f[7]) */
+#define KVM_ARCH_FP_D_F8 352 /* offsetof(struct kvm_cpu_context, fp.d.f[8]) */
+#define KVM_ARCH_FP_D_F9 360 /* offsetof(struct kvm_cpu_context, fp.d.f[9]) */
+#define KVM_ARCH_FP_D_F10 368 /* offsetof(struct kvm_cpu_context, fp.d.f[10]) */
+#define KVM_ARCH_FP_D_F11 376 /* offsetof(struct kvm_cpu_context, fp.d.f[11]) */
+#define KVM_ARCH_FP_D_F12 384 /* offsetof(struct kvm_cpu_context, fp.d.f[12]) */
+#define KVM_ARCH_FP_D_F13 392 /* offsetof(struct kvm_cpu_context, fp.d.f[13]) */
+#define KVM_ARCH_FP_D_F14 400 /* offsetof(struct kvm_cpu_context, fp.d.f[14]) */
+#define KVM_ARCH_FP_D_F15 408 /* offsetof(struct kvm_cpu_context, fp.d.f[15]) */
+#define KVM_ARCH_FP_D_F16 416 /* offsetof(struct kvm_cpu_context, fp.d.f[16]) */
+#define KVM_ARCH_FP_D_F17 424 /* offsetof(struct kvm_cpu_context, fp.d.f[17]) */
+#define KVM_ARCH_FP_D_F18 432 /* offsetof(struct kvm_cpu_context, fp.d.f[18]) */
+#define KVM_ARCH_FP_D_F19 440 /* offsetof(struct kvm_cpu_context, fp.d.f[19]) */
+#define KVM_ARCH_FP_D_F20 448 /* offsetof(struct kvm_cpu_context, fp.d.f[20]) */
+#define KVM_ARCH_FP_D_F21 456 /* offsetof(struct kvm_cpu_context, fp.d.f[21]) */
+#define KVM_ARCH_FP_D_F22 464 /* offsetof(struct kvm_cpu_context, fp.d.f[22]) */
+#define KVM_ARCH_FP_D_F23 472 /* offsetof(struct kvm_cpu_context, fp.d.f[23]) */
+#define KVM_ARCH_FP_D_F24 480 /* offsetof(struct kvm_cpu_context, fp.d.f[24]) */
+#define KVM_ARCH_FP_D_F25 488 /* offsetof(struct kvm_cpu_context, fp.d.f[25]) */
+#define KVM_ARCH_FP_D_F26 496 /* offsetof(struct kvm_cpu_context, fp.d.f[26]) */
+#define KVM_ARCH_FP_D_F27 504 /* offsetof(struct kvm_cpu_context, fp.d.f[27]) */
+#define KVM_ARCH_FP_D_F28 512 /* offsetof(struct kvm_cpu_context, fp.d.f[28]) */
+#define KVM_ARCH_FP_D_F29 520 /* offsetof(struct kvm_cpu_context, fp.d.f[29]) */
+#define KVM_ARCH_FP_D_F30 528 /* offsetof(struct kvm_cpu_context, fp.d.f[30]) */
+#define KVM_ARCH_FP_D_F31 536 /* offsetof(struct kvm_cpu_context, fp.d.f[31]) */
+#define KVM_ARCH_FP_D_FCSR 544 /* offsetof(struct kvm_cpu_context, fp.d.fcsr) */
+#define TASK_THREAD_RA_RA 0 /* offsetof(struct task_struct, thread.ra) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_SP_RA 8 /* offsetof(struct task_struct, thread.sp) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S0_RA 16 /* offsetof(struct task_struct, thread.s[0]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S1_RA 24 /* offsetof(struct task_struct, thread.s[1]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S2_RA 32 /* offsetof(struct task_struct, thread.s[2]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S3_RA 40 /* offsetof(struct task_struct, thread.s[3]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S4_RA 48 /* offsetof(struct task_struct, thread.s[4]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S5_RA 56 /* offsetof(struct task_struct, thread.s[5]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S6_RA 64 /* offsetof(struct task_struct, thread.s[6]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S7_RA 72 /* offsetof(struct task_struct, thread.s[7]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S8_RA 80 /* offsetof(struct task_struct, thread.s[8]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S9_RA 88 /* offsetof(struct task_struct, thread.s[9]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S10_RA 96 /* offsetof(struct task_struct, thread.s[10]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_S11_RA 104 /* offsetof(struct task_struct, thread.s[11]) - offsetof(struct task_struct, thread.ra) */
+#define TASK_THREAD_F0_F0 0 /* offsetof(struct task_struct, thread.fstate.f[0]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F1_F0 8 /* offsetof(struct task_struct, thread.fstate.f[1]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F2_F0 16 /* offsetof(struct task_struct, thread.fstate.f[2]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F3_F0 24 /* offsetof(struct task_struct, thread.fstate.f[3]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F4_F0 32 /* offsetof(struct task_struct, thread.fstate.f[4]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F5_F0 40 /* offsetof(struct task_struct, thread.fstate.f[5]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F6_F0 48 /* offsetof(struct task_struct, thread.fstate.f[6]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F7_F0 56 /* offsetof(struct task_struct, thread.fstate.f[7]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F8_F0 64 /* offsetof(struct task_struct, thread.fstate.f[8]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F9_F0 72 /* offsetof(struct task_struct, thread.fstate.f[9]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F10_F0 80 /* offsetof(struct task_struct, thread.fstate.f[10]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F11_F0 88 /* offsetof(struct task_struct, thread.fstate.f[11]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F12_F0 96 /* offsetof(struct task_struct, thread.fstate.f[12]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F13_F0 104 /* offsetof(struct task_struct, thread.fstate.f[13]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F14_F0 112 /* offsetof(struct task_struct, thread.fstate.f[14]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F15_F0 120 /* offsetof(struct task_struct, thread.fstate.f[15]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F16_F0 128 /* offsetof(struct task_struct, thread.fstate.f[16]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F17_F0 136 /* offsetof(struct task_struct, thread.fstate.f[17]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F18_F0 144 /* offsetof(struct task_struct, thread.fstate.f[18]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F19_F0 152 /* offsetof(struct task_struct, thread.fstate.f[19]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F20_F0 160 /* offsetof(struct task_struct, thread.fstate.f[20]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F21_F0 168 /* offsetof(struct task_struct, thread.fstate.f[21]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F22_F0 176 /* offsetof(struct task_struct, thread.fstate.f[22]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F23_F0 184 /* offsetof(struct task_struct, thread.fstate.f[23]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F24_F0 192 /* offsetof(struct task_struct, thread.fstate.f[24]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F25_F0 200 /* offsetof(struct task_struct, thread.fstate.f[25]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F26_F0 208 /* offsetof(struct task_struct, thread.fstate.f[26]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F27_F0 216 /* offsetof(struct task_struct, thread.fstate.f[27]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F28_F0 224 /* offsetof(struct task_struct, thread.fstate.f[28]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F29_F0 232 /* offsetof(struct task_struct, thread.fstate.f[29]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F30_F0 240 /* offsetof(struct task_struct, thread.fstate.f[30]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_F31_F0 248 /* offsetof(struct task_struct, thread.fstate.f[31]) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define TASK_THREAD_FCSR_F0 256 /* offsetof(struct task_struct, thread.fstate.fcsr) - offsetof(struct task_struct, thread.fstate.f[0]) */
+#define PT_SIZE_ON_STACK 288 /* ALIGN(sizeof(struct pt_regs), STACK_ALIGN) */
+#define KERNEL_MAP_VIRT_ADDR 8 /* offsetof(struct kernel_mapping, virt_addr) */
+#define SBI_HART_BOOT_TASK_PTR_OFFSET 0 /* offsetof(struct sbi_hart_boot_data, task_ptr) */
+#define SBI_HART_BOOT_STACK_PTR_OFFSET 8 /* offsetof(struct sbi_hart_boot_data, stack_ptr) */
+#define STACKFRAME_SIZE_ON_STACK 16 /* ALIGN(sizeof(struct stackframe), STACK_ALIGN) */
+#define STACKFRAME_FP 0 /* offsetof(struct stackframe, fp) */
+#define STACKFRAME_RA 8 /* offsetof(struct stackframe, ra) */
+
+#endif
diff --git a/riscv/include/generated/autoconf.h b/riscv/include/generated/autoconf.h
new file mode 100644
index 0000000..19d1ca5
--- /dev/null
+++ b/riscv/include/generated/autoconf.h
@@ -0,0 +1,1555 @@
+/*
+ * Automatically generated file; DO NOT EDIT.
+ * Linux/riscv 6.7.3 Kernel Configuration
+ */
+#define CONFIG_HAVE_ARCH_SECCOMP_FILTER 1
+#define CONFIG_SND_PROC_FS 1
+#define CONFIG_SCSI_DMA 1
+#define CONFIG_NETFILTER_FAMILY_BRIDGE 1
+#define CONFIG_CC_HAS_SANCOV_TRACE_PC 1
+#define CONFIG_DEFAULT_INIT ""
+#define CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT 1
+#define CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE 1
+#define CONFIG_INPUT_KEYBOARD 1
+#define CONFIG_INET_TABLE_PERTURB_ORDER 16
+#define CONFIG_ARCH_SUPPORTS_INT128 1
+#define CONFIG_SLUB_CPU_PARTIAL 1
+#define CONFIG_RFS_ACCEL 1
+#define CONFIG_IP_NF_TARGET_REDIRECT_MODULE 1
+#define CONFIG_ARCH_WANTS_THP_SWAP 1
+#define CONFIG_CRC32 1
+#define CONFIG_I2C_BOARDINFO 1
+#define CONFIG_RESET_STARFIVE_JH7110 1
+#define CONFIG_MEMREGION 1
+#define CONFIG_PNFS_FLEXFILE_LAYOUT 1
+#define CONFIG_USB_CONFIGFS_NCM 1
+#define CONFIG_DRM_NOUVEAU_MODULE 1
+#define CONFIG_CLK_STARFIVE_JH7110_PLL 1
+#define CONFIG_PCI_ECAM 1
+#define CONFIG_SECCOMP 1
+#define CONFIG_CPU_FREQ_GOV_CONSERVATIVE_MODULE 1
+#define CONFIG_HIGH_RES_TIMERS 1
+#define CONFIG_SERIAL_SH_SCI_EARLYCON 1
+#define CONFIG_DT_IDLE_GENPD 1
+#define CONFIG_ARCH_HAS_SET_MEMORY 1
+#define CONFIG_CC_HAVE_STACKPROTECTOR_TLS 1
+#define CONFIG_BLK_DEV_DM_MODULE 1
+#define CONFIG_VLAN_8021Q_MODULE 1
+#define CONFIG_GCC11_NO_ARRAY_BOUNDS 1
+#define CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE 1
+#define CONFIG_FIX_EARLYCON_MEM 1
+#define CONFIG_ARCH_DMA_DEFAULT_COHERENT 1
+#define CONFIG_INOTIFY_USER 1
+#define CONFIG_HDMI 1
+#define CONFIG_NETWORK_FILESYSTEMS 1
+#define CONFIG_SATA_AHCI_PLATFORM 1
+#define CONFIG_CPU_FREQ_GOV_ONDEMAND 1
+#define CONFIG_ERRATA_THEAD 1
+#define CONFIG_FB_CORE 1
+#define CONFIG_GLOB 1
+#define CONFIG_ARCH_WANT_LD_ORPHAN_WARN 1
+#define CONFIG_CGROUP_DEVICE 1
+#define CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC 1
+#define CONFIG_ARCH_SUSPEND_POSSIBLE 1
+#define CONFIG_MMU_LAZY_TLB_REFCOUNT 1
+#define CONFIG_MAC80211_STA_HASH_MAX_SIZE 0
+#define CONFIG_HAVE_ARCH_MMAP_RND_BITS 1
+#define CONFIG_PNPACPI 1
+#define CONFIG_CPU_FREQ_GOV_ATTR_SET 1
+#define CONFIG_EXT4_FS_POSIX_ACL 1
+#define CONFIG_PHYLINK 1
+#define CONFIG_ZSTD_COMPRESS 1
+#define CONFIG_SSB_POSSIBLE 1
+#define CONFIG_NFS_V4_2 1
+#define CONFIG_USB_F_EEM_MODULE 1
+#define CONFIG_MMU_NOTIFIER 1
+#define CONFIG_DRM_RADEON_MODULE 1
+#define CONFIG_SPI_DYNAMIC 1
+#define CONFIG_IP_NF_NAT_MODULE 1
+#define CONFIG_USB_XHCI_RCAR 1
+#define CONFIG_USB_OHCI_LITTLE_ENDIAN 1
+#define CONFIG_NET_SCH_FIFO 1
+#define CONFIG_SWPHY 1
+#define CONFIG_FSNOTIFY 1
+#define CONFIG_BLK_DEV_LOOP_MIN_COUNT 8
+#define CONFIG_STP_MODULE 1
+#define CONFIG_ARCH_FLATMEM_ENABLE 1
+#define CONFIG_CRYPTO_MANAGER_DISABLE_TESTS 1
+#define CONFIG_GENERIC_SMP_IDLE_THREAD 1
+#define CONFIG_NET_VENDOR_QUALCOMM 1
+#define CONFIG_RZG2L_THERMAL 1
+#define CONFIG_RTC_DRV_SUN6I 1
+#define CONFIG_ARCH_SUPPORTS_CRASH_DUMP 1
+#define CONFIG_NET_VENDOR_EZCHIP 1
+#define CONFIG_DEFAULT_SECURITY_DAC 1
+#define CONFIG_LDISC_AUTOLOAD 1
+#define CONFIG_USB_CONFIGFS_OBEX 1
+#define CONFIG_IP_VS_NFCT 1
+#define CONFIG_RT_GROUP_SCHED 1
+#define CONFIG_USB_AUTOSUSPEND_DELAY 2
+#define CONFIG_IP6_NF_MANGLE_MODULE 1
+#define CONFIG_HAVE_IRQ_TIME_ACCOUNTING 1
+#define CONFIG_IP_VS_RR_MODULE 1
+#define CONFIG_IPV6 1
+#define CONFIG_HAVE_STACKPROTECTOR 1
+#define CONFIG_NET_9P 1
+#define CONFIG_NET_HANDSHAKE 1
+#define CONFIG_CRYPTO_AEAD 1
+#define CONFIG_COMPAT 1
+#define CONFIG_BQL 1
+#define CONFIG_DEFAULT_TCP_CONG "cubic"
+#define CONFIG_USB_UAS 1
+#define CONFIG_DEVTMPFS 1
+#define CONFIG_OF_IOMMU 1
+#define CONFIG_PNFS_FILE_LAYOUT 1
+#define CONFIG_SUNRPC_BACKCHANNEL 1
+#define CONFIG_IP6_NF_TARGET_REJECT_MODULE 1
+#define CONFIG_CPUFREQ_DT 1
+#define CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX 17
+#define CONFIG_ARCH_SPARSEMEM_ENABLE 1
+#define CONFIG_RAVB 1
+#define CONFIG_NF_NAT_REDIRECT 1
+#define CONFIG_HOTPLUG_CPU 1
+#define CONFIG_WLAN 1
+#define CONFIG_NAMESPACES 1
+#define CONFIG_ARCH_USE_MEMREMAP_PROT 1
+#define CONFIG_NFS_V4_2_READ_PLUS 1
+#define CONFIG_HAVE_ARCH_HUGE_VMAP 1
+#define CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY 1
+#define CONFIG_DRM_GEM_DMA_HELPER_MODULE 1
+#define CONFIG_OF_PMEM 1
+#define CONFIG_USB_CONFIGFS_MODULE 1
+#define CONFIG_RISCV_ISA_V 1
+#define CONFIG_BLK_DEV_BSG 1
+#define CONFIG_RISCV_ISA_ZICBOM 1
+#define CONFIG_INTEGRITY 1
+#define CONFIG_DEBUG_RT_MUTEXES 1
+#define CONFIG_LEGACY_PTYS 1
+#define CONFIG_CRYPTO_DRBG_MENU_MODULE 1
+#define CONFIG_CRYPTO_RNG2 1
+#define CONFIG_MSDOS_FS 1
+#define CONFIG_USB_U_SERIAL_MODULE 1
+#define CONFIG_NET_CLS_CGROUP_MODULE 1
+#define CONFIG_WLAN_VENDOR_MICROCHIP 1
+#define CONFIG_NET_VENDOR_DAVICOM 1
+#define CONFIG_SOFTIRQ_ON_OWN_STACK 1
+#define CONFIG_CAN_MODULE 1
+#define CONFIG_PAGE_SIZE_LESS_THAN_256KB 1
+#define CONFIG_GENERIC_PINCTRL_GROUPS 1
+#define CONFIG_OF_RESERVED_MEM 1
+#define CONFIG_CLK_STARFIVE_JH7110_STG_MODULE 1
+#define CONFIG_SERIAL_8250 1
+#define CONFIG_LZO_DECOMPRESS 1
+#define CONFIG_IOMMU_SUPPORT 1
+#define CONFIG_STMMAC_PLATFORM_MODULE 1
+#define CONFIG_SUN4I_TIMER 1
+#define CONFIG_WLAN_VENDOR_CISCO 1
+#define CONFIG_COMPAT_BINFMT_ELF 1
+#define CONFIG_RD_LZMA 1
+#define CONFIG_USB 1
+#define CONFIG_MODULES_USE_ELF_RELA 1
+#define CONFIG_CRYPTO_HMAC_MODULE 1
+#define CONFIG_WLAN_VENDOR_REALTEK 1
+#define CONFIG_ETHERNET 1
+#define CONFIG_CRYPTO_SHA3_MODULE 1
+#define CONFIG_CRC_ITU_T 1
+#define CONFIG_HAVE_DMA_CONTIGUOUS 1
+#define CONFIG_DQL 1
+#define CONFIG_VXLAN_MODULE 1
+#define CONFIG_FRAMEBUFFER_CONSOLE 1
+#define CONFIG_CLK_STARFIVE_JH71X0 1
+#define CONFIG_SOCK_CGROUP_DATA 1
+#define CONFIG_COREDUMP 1
+#define CONFIG_DRM_SUBALLOC_HELPER_MODULE 1
+#define CONFIG_BCMA_POSSIBLE 1
+#define CONFIG_USB_CONFIGFS_RNDIS 1
+#define CONFIG_NF_LOG_IPV4_MODULE 1
+#define CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO 1
+#define CONFIG_VGA_ARB 1
+#define CONFIG_SATA_HOST 1
+#define CONFIG_SND_SOC 1
+#define CONFIG_SCSI_COMMON 1
+#define CONFIG_PRINTK 1
+#define CONFIG_FB_SYS_FILLRECT 1
+#define CONFIG_TIMERFD 1
+#define CONFIG_DNS_RESOLVER 1
+#define CONFIG_FIRMWARE_TABLE 1
+#define CONFIG_MTD_CFI_I2 1
+#define CONFIG_CRYPTO_AUTHENC_MODULE 1
+#define CONFIG_RISCV_ISA_C 1
+#define CONFIG_ARCH_HAS_SYSCALL_WRAPPER 1
+#define CONFIG_DWMAC_GENERIC_MODULE 1
+#define CONFIG_COMPAT_32BIT_TIME 1
+#define CONFIG_SECURITY_APPARMOR_HASH 1
+#define CONFIG_SHMEM 1
+#define CONFIG_MTD 1
+#define CONFIG_MIGRATION 1
+#define CONFIG_HAVE_ARCH_JUMP_LABEL 1
+#define CONFIG_BUILD_SALT ""
+#define CONFIG_MMC_BLOCK_MINORS 8
+#define CONFIG_DECOMPRESS_LZMA 1
+#define CONFIG_HAVE_KVM_EVENTFD 1
+#define CONFIG_DEVTMPFS_MOUNT 1
+#define CONFIG_SERIAL_SH_SCI_NR_UARTS 18
+#define CONFIG_HAVE_PREEMPT_DYNAMIC 1
+#define CONFIG_DNOTIFY 1
+#define CONFIG_ERRATA_THEAD_PMU 1
+#define CONFIG_INPUT_MOUSEDEV 1
+#define CONFIG_GENERIC_NET_UTILS 1
+#define CONFIG_ATA 1
+#define CONFIG_GPIOLIB_FASTPATH_LIMIT 512
+#define CONFIG_ND_BTT 1
+#define CONFIG_NLS_CODEPAGE_437 1
+#define CONFIG_PATA_TIMINGS 1
+#define CONFIG_ARCH_PROC_KCORE_TEXT 1
+#define CONFIG_EXPORTFS 1
+#define CONFIG_NET_INGRESS 1
+#define CONFIG_HAVE_FUNCTION_ERROR_INJECTION 1
+#define CONFIG_SERIO 1
+#define CONFIG_INPUT_MOUSE 1
+#define CONFIG_FB_SYS_IMAGEBLIT 1
+#define CONFIG_SUNRPC_GSS 1
+#define CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS 1
+#define CONFIG_KCMP 1
+#define CONFIG_RTC_INTF_SYSFS 1
+#define CONFIG_GCC_PLUGINS 1
+#define CONFIG_CPU_FREQ_GOV_COMMON 1
+#define CONFIG_BLK_DEV_INITRD 1
+#define CONFIG_MMC_SUNXI 1
+#define CONFIG_DM_BUFIO_MODULE 1
+#define CONFIG_PCPU_DEV_REFCNT 1
+#define CONFIG_DRM_VIRTIO_GPU_MODULE 1
+#define CONFIG_FB_SYSMEM_HELPERS 1
+#define CONFIG_USB_OTG 1
+#define CONFIG_PREEMPT_NOTIFIERS 1
+#define CONFIG_NF_CT_PROTO_DCCP 1
+#define CONFIG_ZLIB_INFLATE 1
+#define CONFIG_NET_VENDOR_SYNOPSYS 1
+#define CONFIG_THERMAL_OF 1
+#define CONFIG_HWMON 1
+#define CONFIG_NET_VENDOR_DLINK 1
+#define CONFIG_AUDITSYSCALL 1
+#define CONFIG_USB_PHY 1
+#define CONFIG_IP_PNP 1
+#define CONFIG_RISCV_SBI 1
+#define CONFIG_RTC_INTF_PROC 1
+#define CONFIG_PM_CLK 1
+#define CONFIG_CC_IMPLICIT_FALLTHROUGH "-Wimplicit-fallthrough=5"
+#define CONFIG_CPU_IDLE_GOV_MENU 1
+#define CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL 1
+#define CONFIG_ACPI_FAN 1
+#define CONFIG_SECURITY_APPARMOR 1
+#define CONFIG_STACKTRACE_SUPPORT 1
+#define CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP 1
+#define CONFIG_SERIAL_8250_PCILIB 1
+#define CONFIG_RESET_CONTROLLER 1
+#define CONFIG_ACPI 1
+#define CONFIG_LOCKD 1
+#define CONFIG_USB_F_FS_MODULE 1
+#define CONFIG_PCIE_DW_HOST 1
+#define CONFIG_CRYPTO_LIB_AES_MODULE 1
+#define CONFIG_WLAN_VENDOR_RALINK 1
+#define CONFIG_CRYPTO_KPP2 1
+#define CONFIG_NET_VENDOR_MICROCHIP 1
+#define CONFIG_FUNCTION_ALIGNMENT 0
+#define CONFIG_SOC_RENESAS 1
+#define CONFIG_PCI_HOST_GENERIC 1
+#define CONFIG_NET_UDP_TUNNEL_MODULE 1
+#define CONFIG_RPCSEC_GSS_KRB5 1
+#define CONFIG_MTD_CFI_UTIL 1
+#define CONFIG_NO_HZ_IDLE 1
+#define CONFIG_NET_VENDOR_ADAPTEC 1
+#define CONFIG_MOUSE_PS2_BYD 1
+#define CONFIG_SERIAL_SH_SCI 1
+#define CONFIG_SOCK_RX_QUEUE_MAPPING 1
+#define CONFIG_CRYPTO_DRBG_HMAC 1
+#define CONFIG_DRM_BRIDGE 1
+#define CONFIG_FB_DEVICE 1
+#define CONFIG_USB_F_SERIAL_MODULE 1
+#define CONFIG_NET_VENDOR_SILAN 1
+#define CONFIG_PHY_RCAR_GEN3_USB2 1
+#define CONFIG_PINCTRL_STARFIVE_JH7110_AON 1
+#define CONFIG_RISCV_TIMER 1
+#define CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK 1
+#define CONFIG_USB_STORAGE 1
+#define CONFIG_NET_VENDOR_BROADCOM 1
+#define CONFIG_FPU 1
+#define CONFIG_STANDALONE 1
+#define CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 1
+#define CONFIG_CPU_FREQ_GOV_PERFORMANCE 1
+#define CONFIG_EFI 1
+#define CONFIG_RATIONAL 1
+#define CONFIG_WLAN_VENDOR_INTEL 1
+#define CONFIG_WATCHDOG_CORE 1
+#define CONFIG_GENERIC_EARLY_IOREMAP 1
+#define CONFIG_NET_VENDOR_WANGXUN 1
+#define CONFIG_NET_L3_MASTER_DEV 1
+#define CONFIG_VMAP_STACK 1
+#define CONFIG_BLOCK 1
+#define CONFIG_ARCH_STACKWALK 1
+#define CONFIG_INIT_ENV_ARG_LIMIT 32
+#define CONFIG_ROOT_NFS 1
+#define CONFIG_AF_UNIX_OOB 1
+#define CONFIG_USER_NS 1
+#define CONFIG_TMPFS_POSIX_ACL 1
+#define CONFIG_STRICT_KERNEL_RWX 1
+#define CONFIG_NETLINK_DIAG 1
+#define CONFIG_BUG 1
+#define CONFIG_ARCH_HAS_DEBUG_WX 1
+#define CONFIG_PCIE_XILINX 1
+#define CONFIG_PM 1
+#define CONFIG_MEMCG 1
+#define CONFIG_SPI 1
+#define CONFIG_RTC_SYSTOHC_DEVICE "rtc0"
+#define CONFIG_NOUVEAU_DEBUG_DEFAULT 3
+#define CONFIG_OF_IRQ 1
+#define CONFIG_LIBFDT 1
+#define CONFIG_NET_FAILOVER 1
+#define CONFIG_WLAN_VENDOR_PURELIFI 1
+#define CONFIG_IO_URING 1
+#define CONFIG_VT 1
+#define CONFIG_STARFIVE_WATCHDOG 1
+#define CONFIG_SECRETMEM 1
+#define CONFIG_DTC 1
+#define CONFIG_MACB 1
+#define CONFIG_REGMAP_SPI 1
+#define CONFIG_DMA_ACPI 1
+#define CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED 1
+#define CONFIG_RESET_STARFIVE_JH71X0 1
+#define CONFIG_SPLIT_PTLOCK_CPUS 4
+#define CONFIG_SBITMAP 1
+#define CONFIG_MCHP_CLK_MPFS 1
+#define CONFIG_POWER_SUPPLY 1
+#define CONFIG_DM_PERSISTENT_DATA_MODULE 1
+#define CONFIG_CRYPTO_SKCIPHER2 1
+#define CONFIG_NLS 1
+#define CONFIG_AS_IS_GNU 1
+#define CONFIG_MICROSEMI_PHY 1
+#define CONFIG_USB_CONFIGFS_ACM 1
+#define CONFIG_CGROUP_BPF 1
+#define CONFIG_CPU_THERMAL 1
+#define CONFIG_IRQ_WORK 1
+#define CONFIG_PCI_MSI 1
+#define CONFIG_IP_ADVANCED_ROUTER 1
+#define CONFIG_FB_SYS_COPYAREA 1
+#define CONFIG_USB_EHCI_PCI 1
+#define CONFIG_SPARSEMEM_EXTREME 1
+#define CONFIG_USB_COMMON 1
+#define CONFIG_DRM_DISPLAY_HDMI_HELPER 1
+#define CONFIG_IP6_NF_IPTABLES_MODULE 1
+#define CONFIG_DRM_GPUVM_MODULE 1
+#define CONFIG_VIRTIO_ANCHOR 1
+#define CONFIG_DEBUG_INFO_NONE 1
+#define CONFIG_FIXED_PHY 1
+#define CONFIG_SOC_SIFIVE 1
+#define CONFIG_GPIO_ACPI 1
+#define CONFIG_CPU_FREQ_GOV_USERSPACE 1
+#define CONFIG_LOG_CPU_MAX_BUF_SHIFT 12
+#define CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE 32
+#define CONFIG_BLK_DEV_DM_BUILTIN 1
+#define CONFIG_DRM_SUN8I_TCON_TOP_MODULE 1
+#define CONFIG_VGA_ARB_MAX_GPUS 16
+#define CONFIG_GENERIC_PINCONF 1
+#define CONFIG_DEBUG_SG 1
+#define CONFIG_NFS_V4_2_SSC_HELPER 1
+#define CONFIG_ARCH_HAS_BINFMT_FLAT 1
+#define CONFIG_SG_POOL 1
+#define CONFIG_DRM_KMS_HELPER_MODULE 1
+#define CONFIG_NET_VENDOR_PACKET_ENGINES 1
+#define CONFIG_DMA_COHERENT_POOL 1
+#define CONFIG_TOOLCHAIN_HAS_ZBB 1
+#define CONFIG_BLK_MQ_PCI 1
+#define CONFIG_CPU_FREQ_THERMAL 1
+#define CONFIG_RISCV_PMU_LEGACY 1
+#define CONFIG_NLS_ISO8859_1_MODULE 1
+#define CONFIG_R8169 1
+#define CONFIG_MMC_SDHI 1
+#define CONFIG_USB_EHCI_HCD 1
+#define CONFIG_FS_IOMAP 1
+#define CONFIG_CAN_CALC_BITTIMING 1
+#define CONFIG_COMPACT_UNEVICTABLE_DEFAULT 1
+#define CONFIG_RD_ZSTD 1
+#define CONFIG_I2C_RIIC_MODULE 1
+#define CONFIG_NETDEVICES 1
+#define CONFIG_ARCH_HAS_KCOV 1
+#define CONFIG_CGROUP_FREEZER 1
+#define CONFIG_SPI_SUN6I 1
+#define CONFIG_EVENTFD 1
+#define CONFIG_PHY_SUN4I_USB_MODULE 1
+#define CONFIG_DEBUG_RWSEMS 1
+#define CONFIG_FS_POSIX_ACL 1
+#define CONFIG_IPV6_SIT 1
+#define CONFIG_XFRM 1
+#define CONFIG_ARCH_HAS_PMEM_API 1
+#define CONFIG_LINEAR_RANGES 1
+#define CONFIG_HAVE_KPROBES_ON_FTRACE 1
+#define CONFIG_SERIAL_8250_CONSOLE 1
+#define CONFIG_CRYPTO_GENIV_MODULE 1
+#define CONFIG_JUMP_LABEL 1
+#define CONFIG_OVERLAY_FS_MODULE 1
+#define CONFIG_IP_NF_TARGET_MASQUERADE_MODULE 1
+#define CONFIG_HAVE_EBPF_JIT 1
+#define CONFIG_PROC_PAGE_MONITOR 1
+#define CONFIG_USB_CONFIGFS_ECM 1
+#define CONFIG_NETFILTER_XT_TARGET_MASQUERADE_MODULE 1
+#define CONFIG_MTD_SPI_NOR_USE_4K_SECTORS 1
+#define CONFIG_BPF 1
+#define CONFIG_DWMAC_SUN8I_MODULE 1
+#define CONFIG_RD_LZO 1
+#define CONFIG_HAVE_FUNCTION_GRAPH_RETVAL 1
+#define CONFIG_CRYPTO_SHA512_MODULE 1
+#define CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE 1
+#define CONFIG_CC_HAS_ASM_INLINE 1
+#define CONFIG_MTD_OF_PARTS 1
+#define CONFIG_CRYPTO_NULL_MODULE 1
+#define CONFIG_GPIO_CDEV_V1 1
+#define CONFIG_NET_VENDOR_SEEQ 1
+#define CONFIG_NF_DEFRAG_IPV4_MODULE 1
+#define CONFIG_SELECT_MEMORY_MODEL 1
+#define CONFIG_VIRTIO_CONSOLE 1
+#define CONFIG_NETFILTER_ADVANCED 1
+#define CONFIG_GENERIC_STRNLEN_USER 1
+#define CONFIG_MTD_CFI 1
+#define CONFIG_RPMSG_VIRTIO 1
+#define CONFIG_WLAN_VENDOR_RSI 1
+#define CONFIG_CRYPTO_JITTERENTROPY_MODULE 1
+#define CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 1
+#define CONFIG_CRYPTO_GCM_MODULE 1
+#define CONFIG_HAVE_DYNAMIC_FTRACE 1
+#define CONFIG_CDROM 1
+#define CONFIG_USB_CONFIGFS_F_FS 1
+#define CONFIG_PNFS_BLOCK_MODULE 1
+#define CONFIG_NET_VENDOR_RDC 1
+#define CONFIG_PGTABLE_LEVELS 5
+#define CONFIG_POWER_RESET_SYSCON 1
+#define CONFIG_CPUSETS 1
+#define CONFIG_ARCH_HAS_VDSO_DATA 1
+#define CONFIG_SPARSE_IRQ 1
+#define CONFIG_IP_NF_MANGLE_MODULE 1
+#define CONFIG_DT_IDLE_STATES 1
+#define CONFIG_SECURITYFS 1
+#define CONFIG_RCU_STALL_COMMON 1
+#define CONFIG_PCIEPORTBUS 1
+#define CONFIG_DEBUG_BUGVERBOSE 1
+#define CONFIG_EFI_GENERIC_STUB 1
+#define CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK 1
+#define CONFIG_IP_NF_FILTER_MODULE 1
+#define CONFIG_MODULES_TREE_LOOKUP 1
+#define CONFIG_FAT_FS 1
+#define CONFIG_BUILDTIME_TABLE_SORT 1
+#define CONFIG_NVMEM 1
+#define CONFIG_INET_TUNNEL 1
+#define CONFIG_NF_LOG_ARP_MODULE 1
+#define CONFIG_NET_9P_VIRTIO 1
+#define CONFIG_PINCONF 1
+#define CONFIG_BLOCK_HOLDER_DEPRECATED 1
+#define CONFIG_GENERIC_CLOCKEVENTS 1
+#define CONFIG_OID_REGISTRY 1
+#define CONFIG_DWMAC_STARFIVE_MODULE 1
+#define CONFIG_CAN_NETLINK 1
+#define CONFIG_CONSOLE_TRANSLATIONS 1
+#define CONFIG_ARCH_SUPPORTS_ATOMIC_RMW 1
+#define CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED 1
+#define CONFIG_RAID6_PQ_MODULE 1
+#define CONFIG_SUN6I_RTC_CCU 1
+#define CONFIG_JH71XX_PMU 1
+#define CONFIG_SERIAL_EARLYCON 1
+#define CONFIG_ARCH_THEAD 1
+#define CONFIG_NET_VENDOR_NI 1
+#define CONFIG_CRYPTO_AKCIPHER 1
+#define CONFIG_MMIOWB 1
+#define CONFIG_ETHTOOL_NETLINK 1
+#define CONFIG_CPU_FREQ 1
+#define CONFIG_USB_OHCI_HCD 1
+#define CONFIG_ARCH_SUPPORTS_KEXEC_FILE 1
+#define CONFIG_DUMMY_CONSOLE 1
+#define CONFIG_USB_PCI 1
+#define CONFIG_NF_REJECT_IPV4_MODULE 1
+#define CONFIG_GENERIC_IOREMAP 1
+#define CONFIG_ARCH_MMAP_RND_BITS_MAX 24
+#define CONFIG_MMC_DW_STARFIVE 1
+#define CONFIG_GPIO_SIFIVE 1
+#define CONFIG_NVMEM_SUNXI_SID 1
+#define CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE 1
+#define CONFIG_TRACE_IRQFLAGS_SUPPORT 1
+#define CONFIG_MFD_SYSCON 1
+#define CONFIG_DETECT_HUNG_TASK 1
+#define CONFIG_PCIE_BUS_DEFAULT 1
+#define CONFIG_CRYPTO_RNG_MODULE 1
+#define CONFIG_DRM_TTM_HELPER_MODULE 1
+#define CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN 8
+#define CONFIG_CRYPTO_LIB_UTILS 1
+#define CONFIG_SND_USB 1
+#define CONFIG_RD_GZIP 1
+#define CONFIG_HAVE_REGS_AND_STACK_ACCESS_API 1
+#define CONFIG_BLK_PM 1
+#define CONFIG_SECURITY_APPARMOR_PARANOID_LOAD 1
+#define CONFIG_MDIO_BUS 1
+#define CONFIG_TREE_RCU 1
+#define CONFIG_ALLOW_DEV_COREDUMP 1
+#define CONFIG_SUNXI_CCU 1
+#define CONFIG_SWIOTLB 1
+#define CONFIG_EXT4_FS_SECURITY 1
+#define CONFIG_GRO_CELLS 1
+#define CONFIG_SECURITY_APPARMOR_HASH_DEFAULT 1
+#define CONFIG_RISCV_INTC 1
+#define CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN 1
+#define CONFIG_ATA_ACPI 1
+#define CONFIG_NET_VENDOR_CORTINA 1
+#define CONFIG_ELFCORE 1
+#define CONFIG_WQ_WATCHDOG 1
+#define CONFIG_BINFMT_ELF 1
+#define CONFIG_AUDIT_GENERIC 1
+#define CONFIG_SOC_MICROCHIP_POLARFIRE 1
+#define CONFIG_SCSI_PROC_FS 1
+#define CONFIG_I2C_HID_MODULE 1
+#define CONFIG_HAVE_PERF_REGS 1
+#define CONFIG_HAVE_KVM_MSI 1
+#define CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1
+#define CONFIG_NFS_V4_SECURITY_LABEL 1
+#define CONFIG_NET_VENDOR_ALTEON 1
+#define CONFIG_REGULATOR_FIXED_VOLTAGE 1
+#define CONFIG_NET_VENDOR_RENESAS 1
+#define CONFIG_KEYS 1
+#define CONFIG_ERRATA_THEAD_PBMT 1
+#define CONFIG_DEBUG_MUTEXES 1
+#define CONFIG_NETFILTER_XT_MARK_MODULE 1
+#define CONFIG_NETFILTER_XTABLES_MODULE 1
+#define CONFIG_DRM_PANEL_ORIENTATION_QUIRKS_MODULE 1
+#define CONFIG_SOFTLOCKUP_DETECTOR 1
+#define CONFIG_CRYPTO_ECHAINIV_MODULE 1
+#define CONFIG_HAVE_ARCH_AUDITSYSCALL 1
+#define CONFIG_RTC_DRV_GOLDFISH 1
+#define CONFIG_LEGACY_TIOCSTI 1
+#define CONFIG_CRYPTO_USER_API_HASH 1
+#define CONFIG_MTD_CFI_ADV_OPTIONS 1
+#define CONFIG_PM_SLEEP_SMP 1
+#define CONFIG_CRYPTO_HW 1
+#define CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC 1
+#define CONFIG_ACPI_AC 1
+#define CONFIG_HARDIRQS_SW_RESEND 1
+#define CONFIG_ARCH_MICROCHIP_POLARFIRE 1
+#define CONFIG_SPI_MASTER 1
+#define CONFIG_IRQ_STACKS 1
+#define CONFIG_VT_HW_CONSOLE_BINDING 1
+#define CONFIG_THERMAL_HWMON 1
+#define CONFIG_CRYPTO_SKCIPHER 1
+#define CONFIG_XZ_DEC_X86 1
+#define CONFIG_SERIAL_OF_PLATFORM 1
+#define CONFIG_SERIAL_8250_DW 1
+#define CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS 0
+#define CONFIG_CONSOLE_LOGLEVEL_QUIET 4
+#define CONFIG_NVME_CORE_MODULE 1
+#define CONFIG_CRC16 1
+#define CONFIG_GENERIC_CALIBRATE_DELAY 1
+#define CONFIG_NET_CLS 1
+#define CONFIG_TMPFS 1
+#define CONFIG_NET_VENDOR_NETERION 1
+#define CONFIG_RANDSTRUCT_NONE 1
+#define CONFIG_USB_OHCI_HCD_PLATFORM 1
+#define CONFIG_FUTEX 1
+#define CONFIG_IP_VS_MH_TAB_INDEX 12
+#define CONFIG_IP_PNP_DHCP 1
+#define CONFIG_GENERIC_PHY_MIPI_DPHY 1
+#define CONFIG_VIRTIO_PCI 1
+#define CONFIG_UNIX_SCM 1
+#define CONFIG_MMC_SPI 1
+#define CONFIG_CONSOLE_LOGLEVEL_DEFAULT 7
+#define CONFIG_REGMAP_I2C_MODULE 1
+#define CONFIG_GENERIC_SCHED_CLOCK 1
+#define CONFIG_NET_VENDOR_REALTEK 1
+#define CONFIG_CRYPTO_USER_API 1
+#define CONFIG_RTC_HCTOSYS 1
+#define CONFIG_SECURITY_NETWORK 1
+#define CONFIG_SERIAL_CORE_CONSOLE 1
+#define CONFIG_HUGETLB_PAGE 1
+#define CONFIG_NET_VENDOR_EMULEX 1
+#define CONFIG_USB_HID 1
+#define CONFIG_LD_ORPHAN_WARN_LEVEL "warn"
+#define CONFIG_SLUB_DEBUG 1
+#define CONFIG_UCS2_STRING 1
+#define CONFIG_SND_SOC_WM8978_MODULE 1
+#define CONFIG_USB_F_RNDIS_MODULE 1
+#define CONFIG_DMADEVICES 1
+#define CONFIG_PAHOLE_VERSION 0
+#define CONFIG_PINCTRL_RENESAS 1
+#define CONFIG_PINCTRL 1
+#define CONFIG_ARCH_SUPPORTS_CFI_CLANG 1
+#define CONFIG_IPV6_NDISC_NODETYPE 1
+#define CONFIG_PCI_LABEL 1
+#define CONFIG_CGROUP_SCHED 1
+#define CONFIG_QUEUED_RWLOCKS 1
+#define CONFIG_SYSVIPC 1
+#define CONFIG_RAID6_PQ_BENCHMARK 1
+#define CONFIG_ARCH_HAS_GIGANTIC_PAGE 1
+#define CONFIG_USB_CONFIGFS_ECM_SUBSET 1
+#define CONFIG_HAVE_DEBUG_KMEMLEAK 1
+#define CONFIG_NF_CONNTRACK_FTP_MODULE 1
+#define CONFIG_PAGE_SIZE_LESS_THAN_64KB 1
+#define CONFIG_USB_MUSB_SUNXI_MODULE 1
+#define CONFIG_MODULES 1
+#define CONFIG_RPMSG 1
+#define CONFIG_DEBUG_PER_CPU_MAPS 1
+#define CONFIG_USB_GADGET 1
+#define CONFIG_CONTEXT_TRACKING 1
+#define CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATION 1
+#define CONFIG_MQ_IOSCHED_DEADLINE 1
+#define CONFIG_GENERIC_IRQ_IPI 1
+#define CONFIG_DUMMY_CONSOLE_COLUMNS 80
+#define CONFIG_XXHASH 1
+#define CONFIG_SOUND 1
+#define CONFIG_JOLIET 1
+#define CONFIG_ARCH_SUNXI 1
+#define CONFIG_CPU_IDLE_MULTIPLE_DRIVERS 1
+#define CONFIG_PINCTRL_SUN20I_D1 1
+#define CONFIG_PROC_CHILDREN 1
+#define CONFIG_UNIX 1
+#define CONFIG_USB_NET_DRIVERS 1
+#define CONFIG_CC_CAN_LINK 1
+#define CONFIG_LD_IS_BFD 1
+#define CONFIG_NO_HZ_COMMON 1
+#define CONFIG_DRM_MIPI_DSI 1
+#define CONFIG_HAVE_CLK 1
+#define CONFIG_CRYPTO_HASH2 1
+#define CONFIG_NET_VENDOR_VERTEXCOM 1
+#define CONFIG_THERMAL_GOV_STEP_WISE 1
+#define CONFIG_DEFAULT_HOSTNAME "(none)"
+#define CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS 2
+#define CONFIG_CC_HAS_NO_PROFILE_FN_ATTR 1
+#define CONFIG_CPU_FREQ_GOV_POWERSAVE_MODULE 1
+#define CONFIG_NFS_FS 1
+#define CONFIG_SUNXI_SRAM 1
+#define CONFIG_SUN8I_DE2_CCU_MODULE 1
+#define CONFIG_MEMBARRIER 1
+#define CONFIG_XPS 1
+#define CONFIG_INET_ESP_MODULE 1
+#define CONFIG_SECURITY_SELINUX_DEVELOP 1
+#define CONFIG_SGL_ALLOC 1
+#define CONFIG_LZ4_DECOMPRESS 1
+#define CONFIG_FONT_SUPPORT 1
+#define CONFIG_ADVISE_SYSCALLS 1
+#define CONFIG_MD 1
+#define CONFIG_CRYPTO_ALGAPI 1
+#define CONFIG_GENERIC_IRQ_SHOW_LEVEL 1
+#define CONFIG_HOTPLUG_CORE_SYNC 1
+#define CONFIG_NET_VENDOR_WIZNET 1
+#define CONFIG_BRIDGE_MODULE 1
+#define CONFIG_SCHED_MM_CID 1
+#define CONFIG_RD_BZIP2 1
+#define CONFIG_SKB_EXTENSIONS 1
+#define CONFIG_PM_OPP 1
+#define CONFIG_GPIO_CDEV 1
+#define CONFIG_CC_VERSION_TEXT "riscv64-unknown-elf-gcc () 13.2.0"
+#define CONFIG_KEYBOARD_ATKBD 1
+#define CONFIG_LIBNVDIMM 1
+#define CONFIG_NET_IP_TUNNEL 1
+#define CONFIG_MTD_CFI_I1 1
+#define CONFIG_NF_NAT_MODULE 1
+#define CONFIG_BLOCK_LEGACY_AUTOLOAD 1
+#define CONFIG_NET_VENDOR_OKI 1
+#define CONFIG_CPU_IDLE 1
+#define CONFIG_WLAN_VENDOR_INTERSIL 1
+#define CONFIG_NFS_COMMON 1
+#define CONFIG_REGULATOR 1
+#define CONFIG_FAIR_GROUP_SCHED 1
+#define CONFIG_CRYPTO_HASH 1
+#define CONFIG_DRM_SUN8I_MIXER_MODULE 1
+#define CONFIG_EFI_PARTITION 1
+#define CONFIG_GOLDFISH 1
+#define CONFIG_LOG_BUF_SHIFT 17
+#define CONFIG_WLAN_VENDOR_ATH 1
+#define CONFIG_EXTRA_FIRMWARE ""
+#define CONFIG_RUNTIME_KERNEL_TESTING_MENU 1
+#define CONFIG_NET_VENDOR_8390 1
+#define CONFIG_ACPI_VIDEO_MODULE 1
+#define CONFIG_HAVE_KCSAN_COMPILER 1
+#define CONFIG_VFAT_FS 1
+#define CONFIG_ARCH_SUPPORTS_ACPI 1
+#define CONFIG_PID_NS 1
+#define CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE 1
+#define CONFIG_CRC32_SLICEBY8 1
+#define CONFIG_USB_LIBCOMPOSITE_MODULE 1
+#define CONFIG_EFI_PARAMS_FROM_FDT 1
+#define CONFIG_BLK_DEV_SR 1
+#define CONFIG_CPU_RMAP 1
+#define CONFIG_BLK_DEV_LOOP 1
+#define CONFIG_DEFAULT_HUNG_TASK_TIMEOUT 120
+#define CONFIG_VIRTIO_PCI_LEGACY 1
+#define CONFIG_SPI_MEM 1
+#define CONFIG_E1000E 1
+#define CONFIG_INPUT_VIVALDIFMAP 1
+#define CONFIG_MULTIUSER 1
+#define CONFIG_DMA_OF 1
+#define CONFIG_SUSPEND 1
+#define CONFIG_GENERIC_VDSO_TIME_NS 1
+#define CONFIG_CROSS_MEMORY_ATTACH 1
+#define CONFIG_CRYPTO_CBC_MODULE 1
+#define CONFIG_RENESAS_OSTM 1
+#define CONFIG_PINCTRL_SUNXI 1
+#define CONFIG_SUN50I_IOMMU 1
+#define CONFIG_SERIAL_8250_RUNTIME_UARTS 4
+#define CONFIG_CLANG_VERSION 0
+#define CONFIG_FS_MBCACHE 1
+#define CONFIG_RTC_CLASS 1
+#define CONFIG_CRC7 1
+#define CONFIG_CRYPTO_RNG_DEFAULT_MODULE 1
+#define CONFIG_TMPFS_XATTR 1
+#define CONFIG_EXT4_USE_FOR_EXT2 1
+#define CONFIG_USB_RENESAS_USBHS_MODULE 1
+#define CONFIG_ARM_AMBA 1
+#define CONFIG_CPU_PM 1
+#define CONFIG_TIMER_OF 1
+#define CONFIG_ARCH_HAS_DMA_PREP_COHERENT 1
+#define CONFIG_HAVE_FUNCTION_TRACER 1
+#define CONFIG_GENERIC_PHY 1
+#define CONFIG_CPU_ISOLATION 1
+#define CONFIG_NF_NAT_TFTP_MODULE 1
+#define CONFIG_MTD_SPI_NOR 1
+#define CONFIG_DRM_DISPLAY_DP_HELPER 1
+#define CONFIG_ARCH_SELECT_MEMORY_MODEL 1
+#define CONFIG_NETFILTER_XT_TARGET_REDIRECT_MODULE 1
+#define CONFIG_CRYPTO_MANAGER2 1
+#define CONFIG_SERIAL_8250_DEPRECATED_OPTIONS 1
+#define CONFIG_USB_GADGET_VBUS_DRAW 2
+#define CONFIG_ARCH_HAS_PTE_SPECIAL 1
+#define CONFIG_NET_VENDOR_MYRI 1
+#define CONFIG_NF_NAT_MASQUERADE 1
+#define CONFIG_PM_GENERIC_DOMAINS_OF 1
+#define CONFIG_DEBUG_VM_PGTABLE 1
+#define CONFIG_CLZ_TAB 1
+#define CONFIG_GENERIC_PCI_IOMAP 1
+#define CONFIG_SLUB 1
+#define CONFIG_CONFIGFS_FS_MODULE 1
+#define CONFIG_XZ_DEC_BCJ 1
+#define CONFIG_PM_SLEEP 1
+#define CONFIG_I2C_MODULE 1
+#define CONFIG_DEBUG_VM 1
+#define CONFIG_RISCV_ISA_SVPBMT 1
+#define CONFIG_MMC_SDHI_INTERNAL_DMAC 1
+#define CONFIG_BINFMT_SCRIPT 1
+#define CONFIG_EFI_STUB 1
+#define CONFIG_MOUSE_PS2_CYPRESS 1
+#define CONFIG_FRAME_POINTER 1
+#define CONFIG_MOUSE_PS2_LOGIPS2PP 1
+#define CONFIG_TICK_CPU_ACCOUNTING 1
+#define CONFIG_VM_EVENT_COUNTERS 1
+#define CONFIG_SCHED_STACK_END_CHECK 1
+#define CONFIG_RESET_SUNXI 1
+#define CONFIG_CRYPTO_ECB 1
+#define CONFIG_WLAN_VENDOR_BROADCOM 1
+#define CONFIG_DEBUG_FS 1
+#define CONFIG_NET_VENDOR_AMD 1
+#define CONFIG_DRM_TTM_MODULE 1
+#define CONFIG_BASE_FULL 1
+#define CONFIG_FB_CFB_IMAGEBLIT 1
+#define CONFIG_ZLIB_DEFLATE_MODULE 1
+#define CONFIG_SUNRPC 1
+#define CONFIG_RPMSG_NS 1
+#define CONFIG_RENESAS_RZG2LWDT 1
+#define CONFIG_CACHESTAT_SYSCALL 1
+#define CONFIG_RSEQ 1
+#define CONFIG_FW_LOADER 1
+#define CONFIG_KALLSYMS 1
+#define CONFIG_COMMON_CLK 1
+#define CONFIG_STACKPROTECTOR_STRONG 1
+#define CONFIG_PCI 1
+#define CONFIG_RTC_HCTOSYS_DEVICE "rtc0"
+#define CONFIG_NET_VENDOR_FUNGIBLE 1
+#define CONFIG_NET_VENDOR_ASIX 1
+#define CONFIG_DECOMPRESS_XZ 1
+#define CONFIG_PCI_QUIRKS 1
+#define CONFIG_MII 1
+#define CONFIG_MD_BITMAP_FILE 1
+#define CONFIG_SIGNALFD 1
+#define CONFIG_NET_CORE 1
+#define CONFIG_MOUSE_PS2_ALPS 1
+#define CONFIG_EXT4_FS 1
+#define CONFIG_MEMORY_BALLOON 1
+#define CONFIG_UNINLINE_SPIN_UNLOCK 1
+#define CONFIG_SND_JACK_INPUT_DEV 1
+#define CONFIG_CRYPTO_SHA1 1
+#define CONFIG_SATA_PMP 1
+#define CONFIG_XZ_DEC 1
+#define CONFIG_NET_VENDOR_TI 1
+#define CONFIG_LOCKD_V4 1
+#define CONFIG_DUMMY_MODULE 1
+#define CONFIG_NET_VENDOR_ALACRITECH 1
+#define CONFIG_CRYPTO_DEV_ALLWINNER 1
+#define CONFIG_WATCHDOG 1
+#define CONFIG_TUNE_GENERIC 1
+#define CONFIG_HAS_IOMEM 1
+#define CONFIG_ACPI_GENERIC_GSI 1
+#define CONFIG_NF_LOG_IPV6_MODULE 1
+#define CONFIG_PINCTRL_STARFIVE_JH7100 1
+#define CONFIG_CRYPTO_RSA 1
+#define CONFIG_DMA_DIRECT_REMAP 1
+#define CONFIG_CRYPTO_SIG2 1
+#define CONFIG_PWRSEQ_EMMC 1
+#define CONFIG_HAVE_MOVE_PUD 1
+#define CONFIG_VIDEOMODE_HELPERS 1
+#define CONFIG_CRYPTO_ACOMP2 1
+#define CONFIG_ARCH_HAS_SETUP_DMA_OPS 1
+#define CONFIG_HAVE_ARCH_KASAN_VMALLOC 1
+#define CONFIG_ARCH_SIFIVE 1
+#define CONFIG_MTD_MAP_BANK_WIDTH_1 1
+#define CONFIG_SCHED_HRTICK 1
+#define CONFIG_RESET_STARFIVE_JH7100 1
+#define CONFIG_EPOLL 1
+#define CONFIG_GENERIC_IRQ_MULTI_HANDLER 1
+#define CONFIG_SND_PCM 1
+#define CONFIG_SATA_MOBILE_LPM_POLICY 0
+#define CONFIG_SUN20I_D1_CCU 1
+#define CONFIG_RISCV_ISA_V_DEFAULT_ENABLE 1
+#define CONFIG_FAILOVER 1
+#define CONFIG_RISCV_ISA_ZBB 1
+#define CONFIG_CGROUP_HUGETLB 1
+#define CONFIG_USB_U_ETHER_MODULE 1
+#define CONFIG_ERRATA_THEAD_CMO 1
+#define CONFIG_RISCV_DMA_NONCOHERENT 1
+#define CONFIG_GENERIC_PTDUMP 1
+#define CONFIG_NET 1
+#define CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE 1
+#define CONFIG_USB_OHCI_HCD_PCI 1
+#define CONFIG_SND_JACK 1
+#define CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN 1
+#define CONFIG_SECURITY_PATH 1
+#define CONFIG_VIRTIO_DMA_SHARED_BUFFER_MODULE 1
+#define CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC 1
+#define CONFIG_TOOLCHAIN_HAS_V 1
+#define CONFIG_NETFILTER_XT_MATCH_CONNTRACK_MODULE 1
+#define CONFIG_RISCV_ALTERNATIVE 1
+#define CONFIG_PWRSEQ_SIMPLE 1
+#define CONFIG_PINMUX 1
+#define CONFIG_MTD_GEN_PROBE 1
+#define CONFIG_IRQ_DOMAIN_HIERARCHY 1
+#define CONFIG_ATA_FORCE 1
+#define CONFIG_NETFILTER_BPF_LINK 1
+#define CONFIG_MPILIB 1
+#define CONFIG_PACKET 1
+#define CONFIG_XFRM_ALGO_MODULE 1
+#define CONFIG_SND_SIMPLE_CARD_UTILS_MODULE 1
+#define CONFIG_BLK_ICQ 1
+#define CONFIG_HAVE_CLK_PREPARE 1
+#define CONFIG_CRYPTO_AKCIPHER2 1
+#define CONFIG_FB_IOMEM_FOPS 1
+#define CONFIG_SND_CTL_FAST_LOOKUP 1
+#define CONFIG_BTRFS_FS_POSIX_ACL 1
+#define CONFIG_CLK_STARFIVE_JH7100 1
+#define CONFIG_CRYPTO_JITTERENTROPY_OSR 1
+#define CONFIG_NET_VENDOR_ALLWINNER 1
+#define CONFIG_DUMMY_CONSOLE_ROWS 25
+#define CONFIG_USB_XHCI_PLATFORM 1
+#define CONFIG_NF_CONNTRACK_TFTP_MODULE 1
+#define CONFIG_NFS_V3 1
+#define CONFIG_RISCV_ALTERNATIVE_EARLY 1
+#define CONFIG_HAVE_KVM_IRQFD 1
+#define CONFIG_INET 1
+#define CONFIG_XZ_DEC_POWERPC 1
+#define CONFIG_IP_PNP_BOOTP 1
+#define CONFIG_USB_MUSB_HDRC_MODULE 1
+#define CONFIG_VIRTIO_NET 1
+#define CONFIG_NETFILTER_XT_MATCH_ADDRTYPE_MODULE 1
+#define CONFIG_NET_VENDOR_HUAWEI 1
+#define CONFIG_PREVENT_FIRMWARE_BUILD 1
+#define CONFIG_SERIAL_8250_PNP 1
+#define CONFIG_DRM_DW_HDMI_MODULE 1
+#define CONFIG_FREEZER 1
+#define CONFIG_USB_F_SUBSET_MODULE 1
+#define CONFIG_PCI_DOMAINS 1
+#define CONFIG_NET_VENDOR_CHELSIO 1
+#define CONFIG_EFIVAR_FS_MODULE 1
+#define CONFIG_HAVE_ARCH_VMAP_STACK 1
+#define CONFIG_NETFILTER_XT_MATCH_IPVS_MODULE 1
+#define CONFIG_RISCV_MISALIGNED 1
+#define CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN "kernel.org"
+#define CONFIG_USB_F_ACM_MODULE 1
+#define CONFIG_RTC_LIB 1
+#define CONFIG_SUN20I_D1_R_CCU 1
+#define CONFIG_RISCV_ISA_SVNAPOT 1
+#define CONFIG_HAVE_KPROBES 1
+#define CONFIG_CRYPTO_AES_MODULE 1
+#define CONFIG_HAVE_GENERIC_VDSO 1
+#define CONFIG_THREAD_SIZE_ORDER 2
+#define CONFIG_GPIOLIB 1
+#define CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT 1
+#define CONFIG_FUTEX_PI 1
+#define CONFIG_MMC_DW 1
+#define CONFIG_DM_BIO_PRISON_MODULE 1
+#define CONFIG_AUTOFS_FS 1
+#define CONFIG_ISO9660_FS 1
+#define CONFIG_NETFILTER_XT_NAT_MODULE 1
+#define CONFIG_DMA_NONCOHERENT_MMAP 1
+#define CONFIG_CLKSRC_MMIO 1
+#define CONFIG_MTD_CFI_NOSWAP 1
+#define CONFIG_NET_VENDOR_AQUANTIA 1
+#define CONFIG_SCSI_VIRTIO 1
+#define CONFIG_HVC_DRIVER 1
+#define CONFIG_NETFILTER 1
+#define CONFIG_HAVE_ARCH_KASAN 1
+#define CONFIG_NET_VENDOR_SMSC 1
+#define CONFIG_GENERIC_ARCH_TOPOLOGY 1
+#define CONFIG_NFS_DISABLE_UDP_SUPPORT 1
+#define CONFIG_SERIO_SERPORT 1
+#define CONFIG_CLONE_BACKWARDS 1
+#define CONFIG_RD_XZ 1
+#define CONFIG_HAVE_PREEMPT_DYNAMIC_KEY 1
+#define CONFIG_AUXILIARY_BUS 1
+#define CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 1
+#define CONFIG_ATA_VERBOSE_ERROR 1
+#define CONFIG_SND_DRIVERS 1
+#define CONFIG_NET_FLOW_LIMIT 1
+#define CONFIG_LOCKDEP_SUPPORT 1
+#define CONFIG_RISCV_PMU 1
+#define CONFIG_ARCH_WANT_HUGE_PMD_SHARE 1
+#define CONFIG_DEBUG_ATOMIC_SLEEP 1
+#define CONFIG_POSIX_MQUEUE 1
+#define CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS 1
+#define CONFIG_NOUVEAU_DEBUG 5
+#define CONFIG_NETFILTER_INGRESS 1
+#define CONFIG_CRYPTO_LIB_GF128MUL_MODULE 1
+#define CONFIG_PCIE_FU740 1
+#define CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE 256
+#define CONFIG_CPU_FREQ_STAT 1
+#define CONFIG_NET_XGRESS 1
+#define CONFIG_GENERIC_STRNCPY_FROM_USER 1
+#define CONFIG_MTD_BLKDEVS 1
+#define CONFIG_HAVE_RSEQ 1
+#define CONFIG_OF_KOBJ 1
+#define CONFIG_CONTEXT_TRACKING_IDLE 1
+#define CONFIG_DEBUG_SPINLOCK 1
+#define CONFIG_NET_VENDOR_DEC 1
+#define CONFIG_AS_HAS_ULEB128 1
+#define CONFIG_ACPI_BUTTON 1
+#define CONFIG_GENERIC_GETTIMEOFDAY 1
+#define CONFIG_PCS_XPCS_MODULE 1
+#define CONFIG_ARCH_USE_MEMTEST 1
+#define CONFIG_DRM_SUN6I_DSI_MODULE 1
+#define CONFIG_NET_VENDOR_PENSANDO 1
+#define CONFIG_IP6_NF_FILTER_MODULE 1
+#define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024
+#define CONFIG_EFI_ESRT 1
+#define CONFIG_NEED_DMA_MAP_STATE 1
+#define CONFIG_IIO 1
+#define CONFIG_SND_HDA_PREALLOC_SIZE 64
+#define CONFIG_SERIO_LIBPS2 1
+#define CONFIG_IP_VS_PROTO_TCP 1
+#define CONFIG_SERIAL_SIFIVE_CONSOLE 1
+#define CONFIG_SOC_VIRT 1
+#define CONFIG_PAGE_OFFSET 0xff60000000000000
+#define CONFIG_FONT_8x8 1
+#define CONFIG_NET_VENDOR_ATHEROS 1
+#define CONFIG_CLK_STARFIVE_JH7110_ISP_MODULE 1
+#define CONFIG_XOR_BLOCKS_MODULE 1
+#define CONFIG_TIME_NS 1
+#define CONFIG_NET_VENDOR_SUN 1
+#define CONFIG_PANIC_TIMEOUT 0
+#define CONFIG_PM_GENERIC_DOMAINS_SLEEP 1
+#define CONFIG_HAVE_ARCH_SECCOMP 1
+#define CONFIG_STACKDEPOT 1
+#define CONFIG_NET_VENDOR_XILINX 1
+#define CONFIG_DECOMPRESS_LZ4 1
+#define CONFIG_PREEMPT_NONE 1
+#define CONFIG_SPARSEMEM_MANUAL 1
+#define CONFIG_ERRATA_SIFIVE_CIP_453 1
+#define CONFIG_BPF_SYSCALL 1
+#define CONFIG_SMP 1
+#define CONFIG_NET_VENDOR_CADENCE 1
+#define CONFIG_NET_VENDOR_MICROSOFT 1
+#define CONFIG_TTY 1
+#define CONFIG_IP_VS_MODULE 1
+#define CONFIG_NET_VENDOR_I825XX 1
+#define CONFIG_PNP 1
+#define CONFIG_RCU_EXP_CPU_STALL_TIMEOUT 0
+#define CONFIG_GENERIC_ALLOCATOR 1
+#define CONFIG_MMC_SDHCI_IO_ACCESSORS 1
+#define CONFIG_FB_SYSMEM_HELPERS_DEFERRED 1
+#define CONFIG_LIBCRC32C_MODULE 1
+#define CONFIG_GENERIC_BUG 1
+#define CONFIG_CRYPTO_SHA256_MODULE 1
+#define CONFIG_HAVE_FTRACE_MCOUNT_RECORD 1
+#define CONFIG_BRIDGE_VLAN_FILTERING 1
+#define CONFIG_POSIX_TIMERS 1
+#define CONFIG_INET_TCP_DIAG 1
+#define CONFIG_HOTPLUG_CORE_SYNC_DEAD 1
+#define CONFIG_HW_CONSOLE 1
+#define CONFIG_MDIO_BITBANG 1
+#define CONFIG_HAVE_KVM_IRQ_ROUTING 1
+#define CONFIG_DWMAC_SUNXI_MODULE 1
+#define CONFIG_DEVMEM 1
+#define CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY 1
+#define CONFIG_MOUSE_PS2_FOCALTECH 1
+#define CONFIG_CRYPTO_LIB_SHA1 1
+#define CONFIG_SND_SPI 1
+#define CONFIG_SOC_STARFIVE 1
+#define CONFIG_LIST_HARDENED 1
+#define CONFIG_DM_THIN_PROVISIONING_MODULE 1
+#define CONFIG_KEYBOARD_SUN4I_LRADC_MODULE 1
+#define CONFIG_PREEMPT_NONE_BUILD 1
+#define CONFIG_RTC_NVMEM 1
+#define CONFIG_ZSTD_COMMON 1
+#define CONFIG_CC_HAS_KASAN_GENERIC 1
+#define CONFIG_DRM_SUN8I_DW_HDMI_MODULE 1
+#define CONFIG_POWER_RESET_SYSCON_POWEROFF 1
+#define CONFIG_DEBUG_KERNEL 1
+#define CONFIG_AS_HAS_OPTION_ARCH 1
+#define CONFIG_COMPAT_BRK 1
+#define CONFIG_CLK_STARFIVE_JH7110_SYS 1
+#define CONFIG_LOCALVERSION ""
+#define CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU 1
+#define CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 1
+#define CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK 1
+#define CONFIG_MEMTEST 1
+#define CONFIG_CMODEL_MEDANY 1
+#define CONFIG_CAN_RAW_MODULE 1
+#define CONFIG_SYMBOLIC_ERRNAME 1
+#define CONFIG_CRYPTO 1
+#define CONFIG_SCHED_DEBUG 1
+#define CONFIG_NET_VENDOR_BROCADE 1
+#define CONFIG_BTRFS_FS_MODULE 1
+#define CONFIG_DEFAULT_MMAP_MIN_ADDR 4096
+#define CONFIG_IP_NF_IPTABLES_MODULE 1
+#define CONFIG_CMDLINE ""
+#define CONFIG_NET_VENDOR_QLOGIC 1
+#define CONFIG_USB_XHCI_HCD 1
+#define CONFIG_VIRTIO 1
+#define CONFIG_SERIAL_SIFIVE 1
+#define CONFIG_CFS_BANDWIDTH 1
+#define CONFIG_NET_SELFTESTS 1
+#define CONFIG_ARCH_RENESAS 1
+#define CONFIG_DMA_VIRTUAL_CHANNELS_MODULE 1
+#define CONFIG_USB_ARCH_HAS_HCD 1
+#define CONFIG_GENERIC_IRQ_SHOW 1
+#define CONFIG_I2C_MV64XXX_MODULE 1
+#define CONFIG_NVMEM_SYSFS 1
+#define CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE 1
+#define CONFIG_ARCH_HAS_ELF_RANDOMIZE 1
+#define CONFIG_9P_FS 1
+#define CONFIG_NETFS_SUPPORT 1
+#define CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 1
+#define CONFIG_PANIC_ON_OOPS_VALUE 0
+#define CONFIG_NET_VENDOR_SAMSUNG 1
+#define CONFIG_NET_VENDOR_ADI 1
+#define CONFIG_INITRAMFS_PRESERVE_MTIME 1
+#define CONFIG_SCSI_MOD 1
+#define CONFIG_NET_VENDOR_MICREL 1
+#define CONFIG_CRYPTO_CRC32C 1
+#define CONFIG_SERIAL_CORE 1
+#define CONFIG_USB_CONFIGFS_SERIAL 1
+#define CONFIG_HAVE_KRETPROBES 1
+#define CONFIG_ASSOCIATIVE_ARRAY 1
+#define CONFIG_NF_DEFRAG_IPV6_MODULE 1
+#define CONFIG_MICREL_PHY 1
+#define CONFIG_MODULE_COMPRESS_NONE 1
+#define CONFIG_CC_HAS_ZERO_CALL_USED_REGS 1
+#define CONFIG_NFS_V4 1
+#define CONFIG_RCU_NEED_SEGCBLIST 1
+#define CONFIG_HAS_DMA 1
+#define CONFIG_NF_CT_PROTO_SCTP 1
+#define CONFIG_SCSI 1
+#define CONFIG_FB_CFB_FILLRECT 1
+#define CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST 1
+#define CONFIG_HID 1
+#define CONFIG_DMA_DECLARE_COHERENT 1
+#define CONFIG_CGROUP_NET_PRIO 1
+#define CONFIG_FONT_8x16 1
+#define CONFIG_NET_VENDOR_MELLANOX 1
+#define CONFIG_VT_CONSOLE_SLEEP 1
+#define CONFIG_RCU_EQS_DEBUG 1
+#define CONFIG_ARCH_HAS_CURRENT_STACK_POINTER 1
+#define CONFIG_JBD2 1
+#define CONFIG_SPARSEMEM_VMEMMAP 1
+#define CONFIG_MEMCG_KMEM 1
+#define CONFIG_NET_VENDOR_MARVELL 1
+#define CONFIG_PHYLIB 1
+#define CONFIG_REGULATOR_GPIO 1
+#define CONFIG_NET_VENDOR_NVIDIA 1
+#define CONFIG_ARCH_RV64I 1
+#define CONFIG_IRQ_DOMAIN 1
+#define CONFIG_RISCV 1
+#define CONFIG_VIRTIO_BALLOON 1
+#define CONFIG_LSM_MMAP_MIN_ADDR 65536
+#define CONFIG_LOCALVERSION_AUTO 1
+#define CONFIG_INTEGRITY_AUDIT 1
+#define CONFIG_ARCH_HAS_DEBUG_VIRTUAL 1
+#define CONFIG_CLK_STARFIVE_JH7100_AUDIO_MODULE 1
+#define CONFIG_HAVE_ASM_MODVERSIONS 1
+#define CONFIG_IPC_NS 1
+#define CONFIG_MISC_FILESYSTEMS 1
+#define CONFIG_ARCH_MMAP_RND_BITS_MIN 18
+#define CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY 1
+#define CONFIG_DECOMPRESS_BZIP2 1
+#define CONFIG_PER_VMA_LOCK 1
+#define CONFIG_ARCH_SUPPORTS_UPROBES 1
+#define CONFIG_NET_VENDOR_STMICRO 1
+#define CONFIG_XZ_DEC_SPARC 1
+#define CONFIG_SECURITY_APPARMOR_EXPORT_BINARY 1
+#define CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE 1
+#define CONFIG_OF_GPIO 1
+#define CONFIG_ARCH_SUPPORTS_HUGETLBFS 1
+#define CONFIG_SERIAL_MCTRL_GPIO 1
+#define CONFIG_REALTEK_PHY 1
+#define CONFIG_DST_CACHE 1
+#define CONFIG_KVM_GENERIC_HARDWARE_ENABLING 1
+#define CONFIG_NF_REJECT_IPV6_MODULE 1
+#define CONFIG_RCU_CPU_STALL_TIMEOUT 21
+#define CONFIG_POSIX_CPU_TIMERS_TASK_WORK 1
+#define CONFIG_CHECKPOINT_RESTORE 1
+#define CONFIG_SND_VERBOSE_PROCFS 1
+#define CONFIG_LLD_VERSION 0
+#define CONFIG_SECTION_MISMATCH_WARN_ONLY 1
+#define CONFIG_NETFILTER_EGRESS 1
+#define CONFIG_MDIO_DEVICE 1
+#define CONFIG_TIMER_PROBE 1
+#define CONFIG_MODPROBE_PATH "/sbin/modprobe"
+#define CONFIG_POWER_RESET 1
+#define CONFIG_DRM_DISPLAY_HELPER_MODULE 1
+#define CONFIG_USB_RENESAS_USBHS_UDC_MODULE 1
+#define CONFIG_IP6_NF_MATCH_IPV6HEADER_MODULE 1
+#define CONFIG_MACVLAN_MODULE 1
+#define CONFIG_PCIEASPM_DEFAULT 1
+#define CONFIG_PROFILING 1
+#define CONFIG_INTERVAL_TREE 1
+#define CONFIG_MMC_DW_PLTFM 1
+#define CONFIG_NET_VENDOR_AMAZON 1
+#define CONFIG_SPARSEMEM 1
+#define CONFIG_BLK_MQ_STACKING 1
+#define CONFIG_DRM_GEM_SHMEM_HELPER_MODULE 1
+#define CONFIG_WLAN_VENDOR_ATMEL 1
+#define CONFIG_GRACE_PERIOD 1
+#define CONFIG_NET_VENDOR_TEHUTI 1
+#define CONFIG_CRYPTO_MANAGER 1
+#define CONFIG_EDAC_SUPPORT 1
+#define CONFIG_RT_MUTEXES 1
+#define CONFIG_LOCK_SPIN_ON_OWNER 1
+#define CONFIG_CC_NO_ARRAY_BOUNDS 1
+#define CONFIG_DRM_I2C_SIL164_MODULE 1
+#define CONFIG_HUGETLBFS 1
+#define CONFIG_SLAB_MERGE_DEFAULT 1
+#define CONFIG_KERNFS 1
+#define CONFIG_I2C_ALGOBIT_MODULE 1
+#define CONFIG_MMC_BLOCK 1
+#define CONFIG_ACPI_REDUCED_HARDWARE_ONLY 1
+#define CONFIG_KVM_MODULE 1
+#define CONFIG_PAGE_COUNTER 1
+#define CONFIG_IOMMU_DEFAULT_DMA_STRICT 1
+#define CONFIG_CLK_RENESAS 1
+#define CONFIG_SND_SIMPLE_CARD_MODULE 1
+#define CONFIG_SND_PCI 1
+#define CONFIG_EXPERT 1
+#define CONFIG_WIRELESS 1
+#define CONFIG_RPMSG_CTRL 1
+#define CONFIG_ARCH_SOPHGO 1
+#define CONFIG_HZ_250 1
+#define CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS 64
+#define CONFIG_ARCH_HAS_STRICT_KERNEL_RWX 1
+#define CONFIG_LOCK_DEBUGGING_SUPPORT 1
+#define CONFIG_NF_LOG_SYSLOG_MODULE 1
+#define CONFIG_DMA_SUN6I_MODULE 1
+#define CONFIG_FAT_DEFAULT_IOCHARSET "iso8859-1"
+#define CONFIG_USB_CONFIGFS_MASS_STORAGE 1
+#define CONFIG_RISCV_ISA_ZICBOZ 1
+#define CONFIG_FRAME_WARN 2048
+#define CONFIG_NET_VENDOR_AGERE 1
+#define CONFIG_HID_GENERIC 1
+#define CONFIG_ARCH_MMAP_RND_BITS 18
+#define CONFIG_GENERIC_HWEIGHT 1
+#define CONFIG_INITRAMFS_SOURCE ""
+#define CONFIG_TASKS_TRACE_RCU 1
+#define CONFIG_CGROUPS 1
+#define CONFIG_MMC 1
+#define CONFIG_LZO_COMPRESS_MODULE 1
+#define CONFIG_DAX 1
+#define CONFIG_VIRTIO_INPUT 1
+#define CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 1
+#define CONFIG_CRYPTO_SEQIV_MODULE 1
+#define CONFIG_DRM_VIRTIO_GPU_KMS 1
+#define CONFIG_HAVE_GCC_PLUGINS 1
+#define CONFIG_STACKTRACE 1
+#define CONFIG_HAVE_PCI 1
+#define CONFIG_EXTCON 1
+#define CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC 1
+#define CONFIG_HAS_IOPORT 1
+#define CONFIG_CRYPTO_DRBG_MODULE 1
+#define CONFIG_OF_EARLY_FLATTREE 1
+#define CONFIG_WLAN_VENDOR_ADMTEK 1
+#define CONFIG_CGROUP_CPUACCT 1
+#define CONFIG_CAN_BCM_MODULE 1
+#define CONFIG_HAS_IOPORT_MAP 1
+#define CONFIG_VIRTIO_PCI_LIB 1
+#define CONFIG_NET_VENDOR_VIA 1
+#define CONFIG_SERIAL_SH_SCI_CONSOLE 1
+#define CONFIG_HZ 250
+#define CONFIG_I2C_HELPER_AUTO 1
+#define CONFIG_PINCTRL_STARFIVE_JH7110_SYS 1
+#define CONFIG_KVM_XFER_TO_GUEST_WORK 1
+#define CONFIG_SERIAL_8250_PERICOM 1
+#define CONFIG_SIFIVE_PLIC 1
+#define CONFIG_SERIAL_8250_NR_UARTS 4
+#define CONFIG_ARCH_HAS_STRICT_MODULE_RWX 1
+#define CONFIG_CC_IS_GCC 1
+#define CONFIG_NET_EGRESS 1
+#define CONFIG_NET_VENDOR_ARC 1
+#define CONFIG_CRYPTO_ENGINE 1
+#define CONFIG_DRM_I2C_CH7006_MODULE 1
+#define CONFIG_HAVE_PERF_USER_STACK_DUMP 1
+#define CONFIG_CGROUP_PERF 1
+#define CONFIG_NLATTR 1
+#define CONFIG_TCP_CONG_CUBIC 1
+#define CONFIG_NR_CPUS 64
+#define CONFIG_SUSPEND_FREEZER 1
+#define CONFIG_MMC_SDHCI 1
+#define CONFIG_SND_SUPPORT_OLD_API 1
+#define CONFIG_MOUSE_PS2_TRACKPOINT 1
+#define CONFIG_DRM_NOUVEAU_BACKLIGHT 1
+#define CONFIG_SYSFS 1
+#define CONFIG_USB_DEFAULT_PERSIST 1
+#define CONFIG_AS_HAS_NON_CONST_LEB128 1
+#define CONFIG_DRM_PANEL_BRIDGE 1
+#define CONFIG_USB_EHCI_HCD_PLATFORM 1
+#define CONFIG_BLK_DEV_BSG_COMMON 1
+#define CONFIG_ASN1 1
+#define CONFIG_CLK_SIFIVE_PRCI 1
+#define CONFIG_CRYPTO_DEV_VIRTIO 1
+#define CONFIG_SPI_RSPI_MODULE 1
+#define CONFIG_XZ_DEC_ARM 1
+#define CONFIG_USB_CONFIGFS_EEM 1
+#define CONFIG_PTP_1588_CLOCK_OPTIONAL 1
+#define CONFIG_FB_DMAMEM_HELPERS 1
+#define CONFIG_FB_SYS_FOPS 1
+#define CONFIG_HAVE_SYSCALL_TRACEPOINTS 1
+#define CONFIG_HAVE_ARCH_HUGE_VMALLOC 1
+#define CONFIG_SERIAL_SH_SCI_DMA 1
+#define CONFIG_ACPI_BATTERY 1
+#define CONFIG_IO_WQ 1
+#define CONFIG_DECOMPRESS_ZSTD 1
+#define CONFIG_FB 1
+#define CONFIG_BLK_MQ_VIRTIO 1
+#define CONFIG_I2C_COMPAT 1
+#define CONFIG_DRM_SCHED_MODULE 1
+#define CONFIG_WLAN_VENDOR_ZYDAS 1
+#define CONFIG_SPARSEMEM_VMEMMAP_ENABLE 1
+#define CONFIG_DRM_SUN4I_MODULE 1
+#define CONFIG_IPVLAN_MODULE 1
+#define CONFIG_VIRTUALIZATION 1
+#define CONFIG_ND_CLAIM 1
+#define CONFIG_MSDOS_PARTITION 1
+#define CONFIG_RTC_I2C_AND_SPI_MODULE 1
+#define CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK 1
+#define CONFIG_LEGACY_DIRECT_IO 1
+#define CONFIG_THERMAL 1
+#define CONFIG_SYNC_FILE 1
+#define CONFIG_USB_XHCI_PCI 1
+#define CONFIG_IP_PNP_RARP 1
+#define CONFIG_DEBUG_TIMEKEEPING 1
+#define CONFIG_VETH_MODULE 1
+#define CONFIG_NET_VENDOR_3COM 1
+#define CONFIG_STACKPROTECTOR 1
+#define CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK 1
+#define CONFIG_MMC_SDHCI_CADENCE 1
+#define CONFIG_HAVE_ARCH_KGDB 1
+#define CONFIG_BLK_DEBUG_FS 1
+#define CONFIG_NET_VENDOR_INTEL 1
+#define CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK 1
+#define CONFIG_RPS 1
+#define CONFIG_SERIAL_8250_EXAR 1
+#define CONFIG_PROC_PID_CPUSET 1
+#define CONFIG_PM_GENERIC_DOMAINS 1
+#define CONFIG_LEGACY_PTY_COUNT 256
+#define CONFIG_GENERIC_CSUM 1
+#define CONFIG_MTD_MAP_BANK_WIDTH_2 1
+#define CONFIG_GENERIC_IDLE_POLL_SETUP 1
+#define CONFIG_RESET_SIMPLE 1
+#define CONFIG_MDIO_BUS_MUX_MODULE 1
+#define CONFIG_ZISOFS 1
+#define CONFIG_WLAN_VENDOR_MEDIATEK 1
+#define CONFIG_IP_MULTICAST 1
+#define CONFIG_NET_VENDOR_CISCO 1
+#define CONFIG_GENERIC_IRQ_IPI_MUX 1
+#define CONFIG_TICK_ONESHOT 1
+#define CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL 1
+#define CONFIG_CRYPTO_CTR_MODULE 1
+#define CONFIG_XARRAY_MULTI 1
+#define CONFIG_LOCK_MM_AND_FIND_VMA 1
+#define CONFIG_SUNXI_WATCHDOG 1
+#define CONFIG_HW_RANDOM 1
+#define CONFIG_MUTEX_SPIN_ON_OWNER 1
+#define CONFIG_DEBUG_VM_IRQSOFF 1
+#define CONFIG_DYNAMIC_SIGFRAME 1
+#define CONFIG_CGROUP_NET_CLASSID 1
+#define CONFIG_RISCV_SBI_CPUIDLE 1
+#define CONFIG_HAVE_FUNCTION_GRAPH_TRACER 1
+#define CONFIG_BUFFER_HEAD 1
+#define CONFIG_OF_MDIO 1
+#define CONFIG_CRYPTO_BLAKE2B_MODULE 1
+#define CONFIG_TREE_SRCU 1
+#define CONFIG_CRYPTO_NULL2_MODULE 1
+#define CONFIG_ARCH_HAS_MMIOWB 1
+#define CONFIG_ACPI_MDIO 1
+#define CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 1
+#define CONFIG_SERIAL_8250_DMA 1
+#define CONFIG_BASE_SMALL 0
+#define CONFIG_SECURITY_SELINUX_AVC_STATS 1
+#define CONFIG_VIDEO_CMDLINE 1
+#define CONFIG_COMPACTION 1
+#define CONFIG_NFS_V2 1
+#define CONFIG_BLK_CGROUP_PUNT_BIO 1
+#define CONFIG_PROC_FS 1
+#define CONFIG_MTD_BLOCK 1
+#define CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE 1
+#define CONFIG_GENERIC_BUG_RELATIVE_POINTERS 1
+#define CONFIG_VIRTIO_MMIO 1
+#define CONFIG_NET_VENDOR_ROCKER 1
+#define CONFIG_SCSI_LOWLEVEL 1
+#define CONFIG_RISCV_PMU_SBI 1
+#define CONFIG_MEMFD_CREATE 1
+#define CONFIG_IRQ_FORCED_THREADING 1
+#define CONFIG_DRM_FBDEV_EMULATION 1
+#define CONFIG_SND 1
+#define CONFIG_CLK_STARFIVE_JH7110_VOUT_MODULE 1
+#define CONFIG_USB_F_OBEX_MODULE 1
+#define CONFIG_PHY_SUN6I_MIPI_DPHY_MODULE 1
+#define CONFIG_PCIE_DW 1
+#define CONFIG_LD_ORPHAN_WARN 1
+#define CONFIG_NET_VENDOR_NATSEMI 1
+#define CONFIG_USB_MUSB_DUAL_ROLE 1
+#define CONFIG_VIRTIO_PCI_LIB_LEGACY 1
+#define CONFIG_USB_F_MASS_STORAGE_MODULE 1
+#define CONFIG_IKCONFIG 1
+#define CONFIG_NET_VENDOR_GOOGLE 1
+#define CONFIG_DEBUG_PLIST 1
+#define CONFIG_GENERIC_IRQ_MIGRATION 1
+#define CONFIG_NET_VENDOR_NETRONOME 1
+#define CONFIG_DEBUG_LIST 1
+#define CONFIG_NFS_USE_KERNEL_DNS 1
+#define CONFIG_ARCH_HAS_FORTIFY_SOURCE 1
+#define CONFIG_GCC_VERSION 130200
+#define CONFIG_CRYPTO_LIB_POLY1305_RSIZE 1
+#define CONFIG_SYSCTL 1
+#define CONFIG_CC_CAN_LINK_STATIC 1
+#define CONFIG_ARCH_HAS_GCOV_PROFILE_ALL 1
+#define CONFIG_BRIDGE_IGMP_SNOOPING 1
+#define CONFIG_PHYS_ADDR_T_64BIT 1
+#define CONFIG_THREAD_INFO_IN_TASK 1
+#define CONFIG_NET_VENDOR_LITEX 1
+#define CONFIG_GENERIC_MSI_IRQ 1
+#define CONFIG_HAVE_ARCH_TRACEHOOK 1
+#define CONFIG_RPMSG_CHAR 1
+#define CONFIG_ARCH_STARFIVE 1
+#define CONFIG_PCI_DOMAINS_GENERIC 1
+#define CONFIG_DRM_FBDEV_OVERALLOC 100
+#define CONFIG_XFRM_USER_MODULE 1
+#define CONFIG_CPUFREQ_DT_PLATDEV 1
+#define CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW 1
+#define CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 1
+#define CONFIG_NET_NS 1
+#define CONFIG_HAVE_PERF_EVENTS 1
+#define CONFIG_BTT 1
+#define CONFIG_ATA_SFF 1
+#define CONFIG_NET_VENDOR_SOLARFLARE 1
+#define CONFIG_CAN_GW_MODULE 1
+#define CONFIG_STMMAC_ETH_MODULE 1
+#define CONFIG_BLK_DEV_PMEM 1
+#define CONFIG_DEBUG_MEMORY_INIT 1
+#define CONFIG_XFRM_ESP_MODULE 1
+#define CONFIG_AUDIT 1
+#define CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE 1
+#define CONFIG_HAVE_RETHOOK 1
+#define CONFIG_NET_9P_FD 1
+#define CONFIG_LTO_NONE 1
+#define CONFIG_PCIEASPM 1
+#define CONFIG_DEBUG_FS_ALLOW_ALL 1
+#define CONFIG_FB_DEFERRED_IO 1
+#define CONFIG_SATA_AHCI 1
+#define CONFIG_SECURITY 1
+#define CONFIG_MAX_SKB_FRAGS 17
+#define CONFIG_PORTABLE 1
+#define CONFIG_SND_TIMER 1
+#define CONFIG_KVM_MMIO 1
+#define CONFIG_CLK_SIFIVE 1
+#define CONFIG_USB_EHCI_TT_NEWSCHED 1
+#define CONFIG_FAT_DEFAULT_CODEPAGE 437
+#define CONFIG_BLK_DEV 1
+#define CONFIG_BRIDGE_NETFILTER_MODULE 1
+#define CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI 1
+#define CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT 1
+#define CONFIG_OF_FLATTREE 1
+#define CONFIG_HAVE_ARCH_KFENCE 1
+#define CONFIG_WLAN_VENDOR_SILABS 1
+#define CONFIG_IOMMU_API 1
+#define CONFIG_RISCV_ISA_FALLBACK 1
+#define CONFIG_GPIO_GENERIC 1
+#define CONFIG_TRACING_SUPPORT 1
+#define CONFIG_UNIX98_PTYS 1
+#define CONFIG_DEBUG_VM_PGFLAGS 1
+#define CONFIG_NET_RX_BUSY_POLL 1
+#define CONFIG_NET_VENDOR_SOCIONEXT 1
+#define CONFIG_SECURITY_SELINUX 1
+#define CONFIG_ZONE_DMA32 1
+#define CONFIG_NET_SCHED 1
+#define CONFIG_ARCH_SUPPORTS_KEXEC 1
+#define CONFIG_DRM_PANEL 1
+#define CONFIG_PRINTK_TIME 1
+#define CONFIG_ARCH_VIRT 1
+#define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768
+#define CONFIG_TASKS_RCU_GENERIC 1
+#define CONFIG_SECCOMP_FILTER 1
+#define CONFIG_IRQCHIP 1
+#define CONFIG_INET_DIAG 1
+#define CONFIG_CRYPTO_GHASH_MODULE 1
+#define CONFIG_GENERIC_ENTRY 1
+#define CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW 1
+#define CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS 9
+#define CONFIG_NF_NAT_FTP_MODULE 1
+#define CONFIG_NF_CT_PROTO_UDPLITE 1
+#define CONFIG_IKCONFIG_PROC 1
+#define CONFIG_ELF_CORE 1
+#define CONFIG_PCI_HOST_COMMON 1
+#define CONFIG_HAVE_CONTEXT_TRACKING_USER 1
+#define CONFIG_MODULE_SECTIONS 1
+#define CONFIG_USB_SUPPORT 1
+#define CONFIG_HAVE_ARCH_KGDB_QXFER_PKT 1
+#define CONFIG_WLAN_VENDOR_ST 1
+#define CONFIG_PCP_BATCH_SCALE_MAX 5
+#define CONFIG_BLK_DEV_NVME_MODULE 1
+#define CONFIG_SOC_BUS 1
+#define CONFIG_NET_VENDOR_SIS 1
+#define CONFIG_HAVE_64BIT_ALIGNED_ACCESS 1
+#define CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE 1
+#define CONFIG_PAGE_POOL 1
+#define CONFIG_SERIAL_8250_16550A_VARIANTS 1
+#define CONFIG_INIT_STACK_ALL_ZERO 1
+#define CONFIG_VT_CONSOLE 1
+#define CONFIG_HW_RANDOM_VIRTIO 1
+#define CONFIG_AS_HAS_INSN 1
+#define CONFIG_MQ_IOSCHED_KYBER 1
+#define CONFIG_AS_VERSION 24200
+#define CONFIG_CC_HAS_INT128 1
+#define CONFIG_EFI_EARLYCON 1
+#define CONFIG_WLAN_VENDOR_MARVELL 1
+#define CONFIG_NOP_USB_XCEIV_MODULE 1
+#define CONFIG_NET_SOCK_MSG 1
+#define CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE 1
+#define CONFIG_SERIAL_8250_DWLIB 1
+#define CONFIG_USB_F_NCM_MODULE 1
+#define CONFIG_ARCH_MMAP_RND_COMPAT_BITS 8
+#define CONFIG_DRM_MODULE 1
+#define CONFIG_PINCTRL_STARFIVE_JH7110 1
+#define CONFIG_POSIX_MQUEUE_SYSCTL 1
+#define CONFIG_VHOST_MENU 1
+#define CONFIG_DRM_EXEC_MODULE 1
+#define CONFIG_DEBUG_MISC 1
+#define CONFIG_FB_CFB_COPYAREA 1
+#define CONFIG_USB_F_ECM_MODULE 1
+#define CONFIG_HAVE_KVM_IRQCHIP 1
+#define CONFIG_NET_VENDOR_MICROSEMI 1
+#define CONFIG_BALLOON_COMPACTION 1
+#define CONFIG_ARCH_OPTIONAL_KERNEL_RWX 1
+#define CONFIG_ARCH_HAS_TICK_BROADCAST 1
+#define CONFIG_BINARY_PRINTF 1
+#define CONFIG_ZSTD_DECOMPRESS 1
+#define CONFIG_WLAN_VENDOR_QUANTENNA 1
+#define CONFIG_SND_PCM_TIMER 1
+#define CONFIG_ARCH_HAS_SET_DIRECT_MAP 1
+#define CONFIG_SYSVIPC_SYSCTL 1
+#define CONFIG_WLAN_VENDOR_TI 1
+#define CONFIG_DMA_SHARED_BUFFER 1
+#define CONFIG_RTC_SYSTOHC 1
+#define CONFIG_OF_ADDRESS 1
+#define CONFIG_DECOMPRESS_GZIP 1
+#define CONFIG_VIRTIO_MENU 1
+#define CONFIG_VIRTIO_BLK 1
+#define CONFIG_DECOMPRESS_LZO 1
+#define CONFIG_CRYPTO_XXHASH_MODULE 1
+#define CONFIG_64BIT 1
+#define CONFIG_MMC_TMIO_CORE 1
+#define CONFIG_MMC_SDHCI_PLTFM 1
+#define CONFIG_I2C_CHARDEV_MODULE 1
+#define CONFIG_LLC_MODULE 1
+#define CONFIG_ARCH_USE_QUEUED_RWLOCKS 1
+#define CONFIG_ARCH_KEEP_MEMBLOCK 1
+#define CONFIG_REGMAP_MMIO 1
+#define CONFIG_NETWORK_SECMARK 1
+#define CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 1
+#define CONFIG_POWER_SUPPLY_HWMON 1
+#define CONFIG_SERIAL_8250_PCI 1
+#define CONFIG_MOUSE_PS2_SYNAPTICS 1
+#define CONFIG_ATA_BMDMA 1
+#define CONFIG_XZ_DEC_ARMTHUMB 1
+#define CONFIG_NFS_V4_1 1
+#define CONFIG_ARCH_WANT_FRAME_POINTERS 1
+#define CONFIG_REGMAP 1
+#define CONFIG_FB_IOMEM_HELPERS 1
+#define CONFIG_PCIE_PME 1
+#define CONFIG_HAVE_MOD_ARCH_SPECIFIC 1
+#define CONFIG_ERRATA_SIFIVE 1
+#define CONFIG_FB_NOTIFY 1
+#define CONFIG_CAN_DEV_MODULE 1
+#define CONFIG_STRICT_MODULE_RWX 1
+#define CONFIG_ERRATA_SIFIVE_CIP_1200 1
+#define CONFIG_SYSCTL_EXCEPTION_TRACE 1
+#define CONFIG_SYSVIPC_COMPAT 1
+#define CONFIG_FHANDLE 1
+#define CONFIG_WATCHDOG_OPEN_TIMEOUT 0
+#define CONFIG_CRYPTO_LIB_SHA256_MODULE 1
+#define CONFIG_SWAP 1
+#define CONFIG_FW_CACHE 1
+#define CONFIG_RESET_POLARFIRE_SOC 1
+#define CONFIG_STACKPROTECTOR_PER_TASK 1
+#define CONFIG_CRC_CCITT_MODULE 1
+#define CONFIG_IPVLAN_L3S 1
+#define CONFIG_NET_VENDOR_CAVIUM 1
+#define CONFIG_GPIOLIB_IRQCHIP 1
+#define CONFIG_BPF_UNPRIV_DEFAULT_OFF 1
+#define CONFIG_BLK_DEV_SD 1
+#define CONFIG_MODULE_UNLOAD 1
+#define CONFIG_PREEMPT_COUNT 1
+#define CONFIG_NET_VENDOR_ENGLEDER 1
+#define CONFIG_RWSEM_SPIN_ON_OWNER 1
+#define CONFIG_CLK_STARFIVE_JH7110_AON_MODULE 1
+#define CONFIG_GENERIC_PINMUX_FUNCTIONS 1
+#define CONFIG_CC_HAS_ASM_GOTO_OUTPUT 1
+#define CONFIG_BITREVERSE 1
+#define CONFIG_DEVPORT 1
+#define CONFIG_IOSCHED_BFQ 1
+#define CONFIG_PNP_DEBUG_MESSAGES 1
+#define CONFIG_NF_CONNTRACK_MODULE 1
+#define CONFIG_EFI_RUNTIME_WRAPPERS 1
+#define CONFIG_MDIO_DEVRES 1
+#define CONFIG_LSM "landlock,lockdown,yama,loadpin,safesetid,bpf"
+#define CONFIG_ARCH_DMA_ADDR_T_64BIT 1
+#define CONFIG_FILE_LOCKING 1
+#define CONFIG_SND_SOC_I2C_AND_SPI_MODULE 1
+#define CONFIG_CAN_RCAR_CANFD_MODULE 1
+#define CONFIG_AIO 1
+#define CONFIG_OF 1
+#define CONFIG_PERF_EVENTS 1
+#define CONFIG_GENERIC_TIME_VSYSCALL 1
+#define CONFIG_IP_NF_TARGET_REJECT_MODULE 1
+#define CONFIG_HAVE_MOVE_PMD 1
+#define CONFIG_KALLSYMS_BASE_RELATIVE 1
+#define CONFIG_IP_VS_TAB_BITS 12
+#define CONFIG_RTC_INTF_DEV 1
+#define CONFIG_SPI_SIFIVE 1
+#define CONFIG_MTD_MAP_BANK_WIDTH_4 1
+#define CONFIG_HID_SUPPORT 1
+#define CONFIG_DEBUG_PAGEALLOC 1
+#define CONFIG_MESSAGE_LOGLEVEL_DEFAULT 4
+#define CONFIG_LOCKUP_DETECTOR 1
+#define CONFIG_IP_VS_PROTO_UDP 1
+#define CONFIG_NLS_DEFAULT "iso8859-1"
+#define CONFIG_UTS_NS 1
+#define CONFIG_VIDEO_NOMODESET 1
+#define CONFIG_PAGE_REPORTING 1
+#define CONFIG_DMA_ENGINE 1
+#define CONFIG_CGROUP_PIDS 1
+#define CONFIG_CRYPTO_AEAD2 1
+#define CONFIG_MOUSE_PS2 1
+#define CONFIG_IP_VS_SH_TAB_BITS 8
+#define CONFIG_CRYPTO_ALGAPI2 1
+#define CONFIG_INPUT 1
+#define CONFIG_PROC_SYSCTL 1
+#define CONFIG_FWNODE_MDIO 1
+#define CONFIG_RD_LZ4 1
+#define CONFIG_MMU 1
+#define CONFIG_LD_VERSION 24200
+#define CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY 1
diff --git a/riscv/include/generated/bounds.h b/riscv/include/generated/bounds.h
new file mode 100644
index 0000000..e6bac58
--- /dev/null
+++ b/riscv/include/generated/bounds.h
@@ -0,0 +1,16 @@
+#ifndef __LINUX_BOUNDS_H__
+#define __LINUX_BOUNDS_H__
+/*
+ * DO NOT MODIFY.
+ *
+ * This file was generated by Kbuild
+ */
+
+#define NR_PAGEFLAGS 22 /* __NR_PAGEFLAGS */
+#define MAX_NR_ZONES 3 /* __MAX_NR_ZONES */
+#define NR_CPUS_BITS 6 /* ilog2(CONFIG_NR_CPUS) */
+#define SPINLOCK_SIZE 24 /* sizeof(spinlock_t) */
+#define LRU_GEN_WIDTH 0 /* 0 */
+#define __LRU_REFS_WIDTH 0 /* 0 */
+
+#endif
diff --git a/riscv/include/generated/compat_vdso-offsets.h b/riscv/include/generated/compat_vdso-offsets.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/riscv/include/generated/compat_vdso-offsets.h
diff --git a/riscv/include/generated/compile.h b/riscv/include/generated/compile.h
new file mode 100644
index 0000000..7bf3487
--- /dev/null
+++ b/riscv/include/generated/compile.h
@@ -0,0 +1,4 @@
+#define UTS_MACHINE "riscv64"
+#define LINUX_COMPILE_BY "dynamic"
+#define LINUX_COMPILE_HOST "ThinkPadP1"
+#define LINUX_COMPILER "riscv64-unknown-elf-gcc () 13.2.0, GNU ld (GNU Binutils) 2.42"
diff --git a/riscv/include/generated/rustc_cfg b/riscv/include/generated/rustc_cfg
new file mode 100644
index 0000000..c6aab3b
--- /dev/null
+++ b/riscv/include/generated/rustc_cfg
@@ -0,0 +1,3013 @@
+--cfg=CONFIG_HAVE_ARCH_SECCOMP_FILTER
+--cfg=CONFIG_HAVE_ARCH_SECCOMP_FILTER="y"
+--cfg=CONFIG_SND_PROC_FS
+--cfg=CONFIG_SND_PROC_FS="y"
+--cfg=CONFIG_SCSI_DMA
+--cfg=CONFIG_SCSI_DMA="y"
+--cfg=CONFIG_NETFILTER_FAMILY_BRIDGE
+--cfg=CONFIG_NETFILTER_FAMILY_BRIDGE="y"
+--cfg=CONFIG_CC_HAS_SANCOV_TRACE_PC
+--cfg=CONFIG_CC_HAS_SANCOV_TRACE_PC="y"
+--cfg=CONFIG_DEFAULT_INIT=""
+--cfg=CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+--cfg=CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT="y"
+--cfg=CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE
+--cfg=CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE="y"
+--cfg=CONFIG_INPUT_KEYBOARD
+--cfg=CONFIG_INPUT_KEYBOARD="y"
+--cfg=CONFIG_INET_TABLE_PERTURB_ORDER="16"
+--cfg=CONFIG_ARCH_SUPPORTS_INT128
+--cfg=CONFIG_ARCH_SUPPORTS_INT128="y"
+--cfg=CONFIG_SLUB_CPU_PARTIAL
+--cfg=CONFIG_SLUB_CPU_PARTIAL="y"
+--cfg=CONFIG_RFS_ACCEL
+--cfg=CONFIG_RFS_ACCEL="y"
+--cfg=CONFIG_IP_NF_TARGET_REDIRECT
+--cfg=CONFIG_IP_NF_TARGET_REDIRECT="m"
+--cfg=CONFIG_ARCH_WANTS_THP_SWAP
+--cfg=CONFIG_ARCH_WANTS_THP_SWAP="y"
+--cfg=CONFIG_CRC32
+--cfg=CONFIG_CRC32="y"
+--cfg=CONFIG_I2C_BOARDINFO
+--cfg=CONFIG_I2C_BOARDINFO="y"
+--cfg=CONFIG_RESET_STARFIVE_JH7110
+--cfg=CONFIG_RESET_STARFIVE_JH7110="y"
+--cfg=CONFIG_MEMREGION
+--cfg=CONFIG_MEMREGION="y"
+--cfg=CONFIG_PNFS_FLEXFILE_LAYOUT
+--cfg=CONFIG_PNFS_FLEXFILE_LAYOUT="y"
+--cfg=CONFIG_USB_CONFIGFS_NCM
+--cfg=CONFIG_USB_CONFIGFS_NCM="y"
+--cfg=CONFIG_DRM_NOUVEAU
+--cfg=CONFIG_DRM_NOUVEAU="m"
+--cfg=CONFIG_CLK_STARFIVE_JH7110_PLL
+--cfg=CONFIG_CLK_STARFIVE_JH7110_PLL="y"
+--cfg=CONFIG_PCI_ECAM
+--cfg=CONFIG_PCI_ECAM="y"
+--cfg=CONFIG_SECCOMP
+--cfg=CONFIG_SECCOMP="y"
+--cfg=CONFIG_CPU_FREQ_GOV_CONSERVATIVE
+--cfg=CONFIG_CPU_FREQ_GOV_CONSERVATIVE="m"
+--cfg=CONFIG_HIGH_RES_TIMERS
+--cfg=CONFIG_HIGH_RES_TIMERS="y"
+--cfg=CONFIG_SERIAL_SH_SCI_EARLYCON
+--cfg=CONFIG_SERIAL_SH_SCI_EARLYCON="y"
+--cfg=CONFIG_DT_IDLE_GENPD
+--cfg=CONFIG_DT_IDLE_GENPD="y"
+--cfg=CONFIG_ARCH_HAS_SET_MEMORY
+--cfg=CONFIG_ARCH_HAS_SET_MEMORY="y"
+--cfg=CONFIG_CC_HAVE_STACKPROTECTOR_TLS
+--cfg=CONFIG_CC_HAVE_STACKPROTECTOR_TLS="y"
+--cfg=CONFIG_BLK_DEV_DM
+--cfg=CONFIG_BLK_DEV_DM="m"
+--cfg=CONFIG_VLAN_8021Q
+--cfg=CONFIG_VLAN_8021Q="m"
+--cfg=CONFIG_GCC11_NO_ARRAY_BOUNDS
+--cfg=CONFIG_GCC11_NO_ARRAY_BOUNDS="y"
+--cfg=CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
+--cfg=CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE="y"
+--cfg=CONFIG_FIX_EARLYCON_MEM
+--cfg=CONFIG_FIX_EARLYCON_MEM="y"
+--cfg=CONFIG_ARCH_DMA_DEFAULT_COHERENT
+--cfg=CONFIG_ARCH_DMA_DEFAULT_COHERENT="y"
+--cfg=CONFIG_INOTIFY_USER
+--cfg=CONFIG_INOTIFY_USER="y"
+--cfg=CONFIG_HDMI
+--cfg=CONFIG_HDMI="y"
+--cfg=CONFIG_NETWORK_FILESYSTEMS
+--cfg=CONFIG_NETWORK_FILESYSTEMS="y"
+--cfg=CONFIG_SATA_AHCI_PLATFORM
+--cfg=CONFIG_SATA_AHCI_PLATFORM="y"
+--cfg=CONFIG_CPU_FREQ_GOV_ONDEMAND
+--cfg=CONFIG_CPU_FREQ_GOV_ONDEMAND="y"
+--cfg=CONFIG_ERRATA_THEAD
+--cfg=CONFIG_ERRATA_THEAD="y"
+--cfg=CONFIG_FB_CORE
+--cfg=CONFIG_FB_CORE="y"
+--cfg=CONFIG_GLOB
+--cfg=CONFIG_GLOB="y"
+--cfg=CONFIG_ARCH_WANT_LD_ORPHAN_WARN
+--cfg=CONFIG_ARCH_WANT_LD_ORPHAN_WARN="y"
+--cfg=CONFIG_CGROUP_DEVICE
+--cfg=CONFIG_CGROUP_DEVICE="y"
+--cfg=CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC
+--cfg=CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC="y"
+--cfg=CONFIG_ARCH_SUSPEND_POSSIBLE
+--cfg=CONFIG_ARCH_SUSPEND_POSSIBLE="y"
+--cfg=CONFIG_MMU_LAZY_TLB_REFCOUNT
+--cfg=CONFIG_MMU_LAZY_TLB_REFCOUNT="y"
+--cfg=CONFIG_MAC80211_STA_HASH_MAX_SIZE="0"
+--cfg=CONFIG_HAVE_ARCH_MMAP_RND_BITS
+--cfg=CONFIG_HAVE_ARCH_MMAP_RND_BITS="y"
+--cfg=CONFIG_PNPACPI
+--cfg=CONFIG_PNPACPI="y"
+--cfg=CONFIG_CPU_FREQ_GOV_ATTR_SET
+--cfg=CONFIG_CPU_FREQ_GOV_ATTR_SET="y"
+--cfg=CONFIG_EXT4_FS_POSIX_ACL
+--cfg=CONFIG_EXT4_FS_POSIX_ACL="y"
+--cfg=CONFIG_PHYLINK
+--cfg=CONFIG_PHYLINK="y"
+--cfg=CONFIG_ZSTD_COMPRESS
+--cfg=CONFIG_ZSTD_COMPRESS="y"
+--cfg=CONFIG_SSB_POSSIBLE
+--cfg=CONFIG_SSB_POSSIBLE="y"
+--cfg=CONFIG_NFS_V4_2
+--cfg=CONFIG_NFS_V4_2="y"
+--cfg=CONFIG_USB_F_EEM
+--cfg=CONFIG_USB_F_EEM="m"
+--cfg=CONFIG_MMU_NOTIFIER
+--cfg=CONFIG_MMU_NOTIFIER="y"
+--cfg=CONFIG_DRM_RADEON
+--cfg=CONFIG_DRM_RADEON="m"
+--cfg=CONFIG_SPI_DYNAMIC
+--cfg=CONFIG_SPI_DYNAMIC="y"
+--cfg=CONFIG_IP_NF_NAT
+--cfg=CONFIG_IP_NF_NAT="m"
+--cfg=CONFIG_USB_XHCI_RCAR
+--cfg=CONFIG_USB_XHCI_RCAR="y"
+--cfg=CONFIG_USB_OHCI_LITTLE_ENDIAN
+--cfg=CONFIG_USB_OHCI_LITTLE_ENDIAN="y"
+--cfg=CONFIG_NET_SCH_FIFO
+--cfg=CONFIG_NET_SCH_FIFO="y"
+--cfg=CONFIG_SWPHY
+--cfg=CONFIG_SWPHY="y"
+--cfg=CONFIG_FSNOTIFY
+--cfg=CONFIG_FSNOTIFY="y"
+--cfg=CONFIG_BLK_DEV_LOOP_MIN_COUNT="8"
+--cfg=CONFIG_STP
+--cfg=CONFIG_STP="m"
+--cfg=CONFIG_ARCH_FLATMEM_ENABLE
+--cfg=CONFIG_ARCH_FLATMEM_ENABLE="y"
+--cfg=CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+--cfg=CONFIG_CRYPTO_MANAGER_DISABLE_TESTS="y"
+--cfg=CONFIG_GENERIC_SMP_IDLE_THREAD
+--cfg=CONFIG_GENERIC_SMP_IDLE_THREAD="y"
+--cfg=CONFIG_NET_VENDOR_QUALCOMM
+--cfg=CONFIG_NET_VENDOR_QUALCOMM="y"
+--cfg=CONFIG_RZG2L_THERMAL
+--cfg=CONFIG_RZG2L_THERMAL="y"
+--cfg=CONFIG_RTC_DRV_SUN6I
+--cfg=CONFIG_RTC_DRV_SUN6I="y"
+--cfg=CONFIG_ARCH_SUPPORTS_CRASH_DUMP
+--cfg=CONFIG_ARCH_SUPPORTS_CRASH_DUMP="y"
+--cfg=CONFIG_NET_VENDOR_EZCHIP
+--cfg=CONFIG_NET_VENDOR_EZCHIP="y"
+--cfg=CONFIG_DEFAULT_SECURITY_DAC
+--cfg=CONFIG_DEFAULT_SECURITY_DAC="y"
+--cfg=CONFIG_LDISC_AUTOLOAD
+--cfg=CONFIG_LDISC_AUTOLOAD="y"
+--cfg=CONFIG_USB_CONFIGFS_OBEX
+--cfg=CONFIG_USB_CONFIGFS_OBEX="y"
+--cfg=CONFIG_IP_VS_NFCT
+--cfg=CONFIG_IP_VS_NFCT="y"
+--cfg=CONFIG_RT_GROUP_SCHED
+--cfg=CONFIG_RT_GROUP_SCHED="y"
+--cfg=CONFIG_USB_AUTOSUSPEND_DELAY="2"
+--cfg=CONFIG_IP6_NF_MANGLE
+--cfg=CONFIG_IP6_NF_MANGLE="m"
+--cfg=CONFIG_HAVE_IRQ_TIME_ACCOUNTING
+--cfg=CONFIG_HAVE_IRQ_TIME_ACCOUNTING="y"
+--cfg=CONFIG_IP_VS_RR
+--cfg=CONFIG_IP_VS_RR="m"
+--cfg=CONFIG_IPV6
+--cfg=CONFIG_IPV6="y"
+--cfg=CONFIG_HAVE_STACKPROTECTOR
+--cfg=CONFIG_HAVE_STACKPROTECTOR="y"
+--cfg=CONFIG_NET_9P
+--cfg=CONFIG_NET_9P="y"
+--cfg=CONFIG_NET_HANDSHAKE
+--cfg=CONFIG_NET_HANDSHAKE="y"
+--cfg=CONFIG_CRYPTO_AEAD
+--cfg=CONFIG_CRYPTO_AEAD="y"
+--cfg=CONFIG_COMPAT
+--cfg=CONFIG_COMPAT="y"
+--cfg=CONFIG_BQL
+--cfg=CONFIG_BQL="y"
+--cfg=CONFIG_DEFAULT_TCP_CONG="cubic"
+--cfg=CONFIG_USB_UAS
+--cfg=CONFIG_USB_UAS="y"
+--cfg=CONFIG_DEVTMPFS
+--cfg=CONFIG_DEVTMPFS="y"
+--cfg=CONFIG_OF_IOMMU
+--cfg=CONFIG_OF_IOMMU="y"
+--cfg=CONFIG_PNFS_FILE_LAYOUT
+--cfg=CONFIG_PNFS_FILE_LAYOUT="y"
+--cfg=CONFIG_SUNRPC_BACKCHANNEL
+--cfg=CONFIG_SUNRPC_BACKCHANNEL="y"
+--cfg=CONFIG_IP6_NF_TARGET_REJECT
+--cfg=CONFIG_IP6_NF_TARGET_REJECT="m"
+--cfg=CONFIG_CPUFREQ_DT
+--cfg=CONFIG_CPUFREQ_DT="y"
+--cfg=CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX="17"
+--cfg=CONFIG_ARCH_SPARSEMEM_ENABLE
+--cfg=CONFIG_ARCH_SPARSEMEM_ENABLE="y"
+--cfg=CONFIG_RAVB
+--cfg=CONFIG_RAVB="y"
+--cfg=CONFIG_NF_NAT_REDIRECT
+--cfg=CONFIG_NF_NAT_REDIRECT="y"
+--cfg=CONFIG_HOTPLUG_CPU
+--cfg=CONFIG_HOTPLUG_CPU="y"
+--cfg=CONFIG_WLAN
+--cfg=CONFIG_WLAN="y"
+--cfg=CONFIG_NAMESPACES
+--cfg=CONFIG_NAMESPACES="y"
+--cfg=CONFIG_ARCH_USE_MEMREMAP_PROT
+--cfg=CONFIG_ARCH_USE_MEMREMAP_PROT="y"
+--cfg=CONFIG_NFS_V4_2_READ_PLUS
+--cfg=CONFIG_NFS_V4_2_READ_PLUS="y"
+--cfg=CONFIG_HAVE_ARCH_HUGE_VMAP
+--cfg=CONFIG_HAVE_ARCH_HUGE_VMAP="y"
+--cfg=CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
+--cfg=CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY="y"
+--cfg=CONFIG_DRM_GEM_DMA_HELPER
+--cfg=CONFIG_DRM_GEM_DMA_HELPER="m"
+--cfg=CONFIG_OF_PMEM
+--cfg=CONFIG_OF_PMEM="y"
+--cfg=CONFIG_USB_CONFIGFS
+--cfg=CONFIG_USB_CONFIGFS="m"
+--cfg=CONFIG_RISCV_ISA_V
+--cfg=CONFIG_RISCV_ISA_V="y"
+--cfg=CONFIG_BLK_DEV_BSG
+--cfg=CONFIG_BLK_DEV_BSG="y"
+--cfg=CONFIG_RISCV_ISA_ZICBOM
+--cfg=CONFIG_RISCV_ISA_ZICBOM="y"
+--cfg=CONFIG_INTEGRITY
+--cfg=CONFIG_INTEGRITY="y"
+--cfg=CONFIG_DEBUG_RT_MUTEXES
+--cfg=CONFIG_DEBUG_RT_MUTEXES="y"
+--cfg=CONFIG_LEGACY_PTYS
+--cfg=CONFIG_LEGACY_PTYS="y"
+--cfg=CONFIG_CRYPTO_DRBG_MENU
+--cfg=CONFIG_CRYPTO_DRBG_MENU="m"
+--cfg=CONFIG_CRYPTO_RNG2
+--cfg=CONFIG_CRYPTO_RNG2="y"
+--cfg=CONFIG_MSDOS_FS
+--cfg=CONFIG_MSDOS_FS="y"
+--cfg=CONFIG_USB_U_SERIAL
+--cfg=CONFIG_USB_U_SERIAL="m"
+--cfg=CONFIG_NET_CLS_CGROUP
+--cfg=CONFIG_NET_CLS_CGROUP="m"
+--cfg=CONFIG_WLAN_VENDOR_MICROCHIP
+--cfg=CONFIG_WLAN_VENDOR_MICROCHIP="y"
+--cfg=CONFIG_NET_VENDOR_DAVICOM
+--cfg=CONFIG_NET_VENDOR_DAVICOM="y"
+--cfg=CONFIG_SOFTIRQ_ON_OWN_STACK
+--cfg=CONFIG_SOFTIRQ_ON_OWN_STACK="y"
+--cfg=CONFIG_CAN
+--cfg=CONFIG_CAN="m"
+--cfg=CONFIG_PAGE_SIZE_LESS_THAN_256KB
+--cfg=CONFIG_PAGE_SIZE_LESS_THAN_256KB="y"
+--cfg=CONFIG_GENERIC_PINCTRL_GROUPS
+--cfg=CONFIG_GENERIC_PINCTRL_GROUPS="y"
+--cfg=CONFIG_OF_RESERVED_MEM
+--cfg=CONFIG_OF_RESERVED_MEM="y"
+--cfg=CONFIG_CLK_STARFIVE_JH7110_STG
+--cfg=CONFIG_CLK_STARFIVE_JH7110_STG="m"
+--cfg=CONFIG_SERIAL_8250
+--cfg=CONFIG_SERIAL_8250="y"
+--cfg=CONFIG_LZO_DECOMPRESS
+--cfg=CONFIG_LZO_DECOMPRESS="y"
+--cfg=CONFIG_IOMMU_SUPPORT
+--cfg=CONFIG_IOMMU_SUPPORT="y"
+--cfg=CONFIG_STMMAC_PLATFORM
+--cfg=CONFIG_STMMAC_PLATFORM="m"
+--cfg=CONFIG_SUN4I_TIMER
+--cfg=CONFIG_SUN4I_TIMER="y"
+--cfg=CONFIG_WLAN_VENDOR_CISCO
+--cfg=CONFIG_WLAN_VENDOR_CISCO="y"
+--cfg=CONFIG_COMPAT_BINFMT_ELF
+--cfg=CONFIG_COMPAT_BINFMT_ELF="y"
+--cfg=CONFIG_RD_LZMA
+--cfg=CONFIG_RD_LZMA="y"
+--cfg=CONFIG_USB
+--cfg=CONFIG_USB="y"
+--cfg=CONFIG_MODULES_USE_ELF_RELA
+--cfg=CONFIG_MODULES_USE_ELF_RELA="y"
+--cfg=CONFIG_CRYPTO_HMAC
+--cfg=CONFIG_CRYPTO_HMAC="m"
+--cfg=CONFIG_WLAN_VENDOR_REALTEK
+--cfg=CONFIG_WLAN_VENDOR_REALTEK="y"
+--cfg=CONFIG_ETHERNET
+--cfg=CONFIG_ETHERNET="y"
+--cfg=CONFIG_CRYPTO_SHA3
+--cfg=CONFIG_CRYPTO_SHA3="m"
+--cfg=CONFIG_CRC_ITU_T
+--cfg=CONFIG_CRC_ITU_T="y"
+--cfg=CONFIG_HAVE_DMA_CONTIGUOUS
+--cfg=CONFIG_HAVE_DMA_CONTIGUOUS="y"
+--cfg=CONFIG_DQL
+--cfg=CONFIG_DQL="y"
+--cfg=CONFIG_VXLAN
+--cfg=CONFIG_VXLAN="m"
+--cfg=CONFIG_FRAMEBUFFER_CONSOLE
+--cfg=CONFIG_FRAMEBUFFER_CONSOLE="y"
+--cfg=CONFIG_CLK_STARFIVE_JH71X0
+--cfg=CONFIG_CLK_STARFIVE_JH71X0="y"
+--cfg=CONFIG_SOCK_CGROUP_DATA
+--cfg=CONFIG_SOCK_CGROUP_DATA="y"
+--cfg=CONFIG_COREDUMP
+--cfg=CONFIG_COREDUMP="y"
+--cfg=CONFIG_DRM_SUBALLOC_HELPER
+--cfg=CONFIG_DRM_SUBALLOC_HELPER="m"
+--cfg=CONFIG_BCMA_POSSIBLE
+--cfg=CONFIG_BCMA_POSSIBLE="y"
+--cfg=CONFIG_USB_CONFIGFS_RNDIS
+--cfg=CONFIG_USB_CONFIGFS_RNDIS="y"
+--cfg=CONFIG_NF_LOG_IPV4
+--cfg=CONFIG_NF_LOG_IPV4="m"
+--cfg=CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO
+--cfg=CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO="y"
+--cfg=CONFIG_VGA_ARB
+--cfg=CONFIG_VGA_ARB="y"
+--cfg=CONFIG_SATA_HOST
+--cfg=CONFIG_SATA_HOST="y"
+--cfg=CONFIG_SND_SOC
+--cfg=CONFIG_SND_SOC="y"
+--cfg=CONFIG_SCSI_COMMON
+--cfg=CONFIG_SCSI_COMMON="y"
+--cfg=CONFIG_PRINTK
+--cfg=CONFIG_PRINTK="y"
+--cfg=CONFIG_FB_SYS_FILLRECT
+--cfg=CONFIG_FB_SYS_FILLRECT="y"
+--cfg=CONFIG_TIMERFD
+--cfg=CONFIG_TIMERFD="y"
+--cfg=CONFIG_DNS_RESOLVER
+--cfg=CONFIG_DNS_RESOLVER="y"
+--cfg=CONFIG_FIRMWARE_TABLE
+--cfg=CONFIG_FIRMWARE_TABLE="y"
+--cfg=CONFIG_MTD_CFI_I2
+--cfg=CONFIG_MTD_CFI_I2="y"
+--cfg=CONFIG_CRYPTO_AUTHENC
+--cfg=CONFIG_CRYPTO_AUTHENC="m"
+--cfg=CONFIG_RISCV_ISA_C
+--cfg=CONFIG_RISCV_ISA_C="y"
+--cfg=CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+--cfg=CONFIG_ARCH_HAS_SYSCALL_WRAPPER="y"
+--cfg=CONFIG_DWMAC_GENERIC
+--cfg=CONFIG_DWMAC_GENERIC="m"
+--cfg=CONFIG_COMPAT_32BIT_TIME
+--cfg=CONFIG_COMPAT_32BIT_TIME="y"
+--cfg=CONFIG_SECURITY_APPARMOR_HASH
+--cfg=CONFIG_SECURITY_APPARMOR_HASH="y"
+--cfg=CONFIG_SHMEM
+--cfg=CONFIG_SHMEM="y"
+--cfg=CONFIG_MTD
+--cfg=CONFIG_MTD="y"
+--cfg=CONFIG_MIGRATION
+--cfg=CONFIG_MIGRATION="y"
+--cfg=CONFIG_HAVE_ARCH_JUMP_LABEL
+--cfg=CONFIG_HAVE_ARCH_JUMP_LABEL="y"
+--cfg=CONFIG_BUILD_SALT=""
+--cfg=CONFIG_MMC_BLOCK_MINORS="8"
+--cfg=CONFIG_DECOMPRESS_LZMA
+--cfg=CONFIG_DECOMPRESS_LZMA="y"
+--cfg=CONFIG_HAVE_KVM_EVENTFD
+--cfg=CONFIG_HAVE_KVM_EVENTFD="y"
+--cfg=CONFIG_DEVTMPFS_MOUNT
+--cfg=CONFIG_DEVTMPFS_MOUNT="y"
+--cfg=CONFIG_SERIAL_SH_SCI_NR_UARTS="18"
+--cfg=CONFIG_HAVE_PREEMPT_DYNAMIC
+--cfg=CONFIG_HAVE_PREEMPT_DYNAMIC="y"
+--cfg=CONFIG_DNOTIFY
+--cfg=CONFIG_DNOTIFY="y"
+--cfg=CONFIG_ERRATA_THEAD_PMU
+--cfg=CONFIG_ERRATA_THEAD_PMU="y"
+--cfg=CONFIG_INPUT_MOUSEDEV
+--cfg=CONFIG_INPUT_MOUSEDEV="y"
+--cfg=CONFIG_GENERIC_NET_UTILS
+--cfg=CONFIG_GENERIC_NET_UTILS="y"
+--cfg=CONFIG_ATA
+--cfg=CONFIG_ATA="y"
+--cfg=CONFIG_GPIOLIB_FASTPATH_LIMIT="512"
+--cfg=CONFIG_ND_BTT
+--cfg=CONFIG_ND_BTT="y"
+--cfg=CONFIG_NLS_CODEPAGE_437
+--cfg=CONFIG_NLS_CODEPAGE_437="y"
+--cfg=CONFIG_PATA_TIMINGS
+--cfg=CONFIG_PATA_TIMINGS="y"
+--cfg=CONFIG_ARCH_PROC_KCORE_TEXT
+--cfg=CONFIG_ARCH_PROC_KCORE_TEXT="y"
+--cfg=CONFIG_EXPORTFS
+--cfg=CONFIG_EXPORTFS="y"
+--cfg=CONFIG_NET_INGRESS
+--cfg=CONFIG_NET_INGRESS="y"
+--cfg=CONFIG_HAVE_FUNCTION_ERROR_INJECTION
+--cfg=CONFIG_HAVE_FUNCTION_ERROR_INJECTION="y"
+--cfg=CONFIG_SERIO
+--cfg=CONFIG_SERIO="y"
+--cfg=CONFIG_INPUT_MOUSE
+--cfg=CONFIG_INPUT_MOUSE="y"
+--cfg=CONFIG_FB_SYS_IMAGEBLIT
+--cfg=CONFIG_FB_SYS_IMAGEBLIT="y"
+--cfg=CONFIG_SUNRPC_GSS
+--cfg=CONFIG_SUNRPC_GSS="y"
+--cfg=CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
+--cfg=CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS="y"
+--cfg=CONFIG_KCMP
+--cfg=CONFIG_KCMP="y"
+--cfg=CONFIG_RTC_INTF_SYSFS
+--cfg=CONFIG_RTC_INTF_SYSFS="y"
+--cfg=CONFIG_GCC_PLUGINS
+--cfg=CONFIG_GCC_PLUGINS="y"
+--cfg=CONFIG_CPU_FREQ_GOV_COMMON
+--cfg=CONFIG_CPU_FREQ_GOV_COMMON="y"
+--cfg=CONFIG_BLK_DEV_INITRD
+--cfg=CONFIG_BLK_DEV_INITRD="y"
+--cfg=CONFIG_MMC_SUNXI
+--cfg=CONFIG_MMC_SUNXI="y"
+--cfg=CONFIG_DM_BUFIO
+--cfg=CONFIG_DM_BUFIO="m"
+--cfg=CONFIG_PCPU_DEV_REFCNT
+--cfg=CONFIG_PCPU_DEV_REFCNT="y"
+--cfg=CONFIG_DRM_VIRTIO_GPU
+--cfg=CONFIG_DRM_VIRTIO_GPU="m"
+--cfg=CONFIG_FB_SYSMEM_HELPERS
+--cfg=CONFIG_FB_SYSMEM_HELPERS="y"
+--cfg=CONFIG_USB_OTG
+--cfg=CONFIG_USB_OTG="y"
+--cfg=CONFIG_PREEMPT_NOTIFIERS
+--cfg=CONFIG_PREEMPT_NOTIFIERS="y"
+--cfg=CONFIG_NF_CT_PROTO_DCCP
+--cfg=CONFIG_NF_CT_PROTO_DCCP="y"
+--cfg=CONFIG_ZLIB_INFLATE
+--cfg=CONFIG_ZLIB_INFLATE="y"
+--cfg=CONFIG_NET_VENDOR_SYNOPSYS
+--cfg=CONFIG_NET_VENDOR_SYNOPSYS="y"
+--cfg=CONFIG_THERMAL_OF
+--cfg=CONFIG_THERMAL_OF="y"
+--cfg=CONFIG_HWMON
+--cfg=CONFIG_HWMON="y"
+--cfg=CONFIG_NET_VENDOR_DLINK
+--cfg=CONFIG_NET_VENDOR_DLINK="y"
+--cfg=CONFIG_AUDITSYSCALL
+--cfg=CONFIG_AUDITSYSCALL="y"
+--cfg=CONFIG_USB_PHY
+--cfg=CONFIG_USB_PHY="y"
+--cfg=CONFIG_IP_PNP
+--cfg=CONFIG_IP_PNP="y"
+--cfg=CONFIG_RISCV_SBI
+--cfg=CONFIG_RISCV_SBI="y"
+--cfg=CONFIG_RTC_INTF_PROC
+--cfg=CONFIG_RTC_INTF_PROC="y"
+--cfg=CONFIG_PM_CLK
+--cfg=CONFIG_PM_CLK="y"
+--cfg=CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
+--cfg=CONFIG_CPU_IDLE_GOV_MENU
+--cfg=CONFIG_CPU_IDLE_GOV_MENU="y"
+--cfg=CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL
+--cfg=CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL="y"
+--cfg=CONFIG_ACPI_FAN
+--cfg=CONFIG_ACPI_FAN="y"
+--cfg=CONFIG_SECURITY_APPARMOR
+--cfg=CONFIG_SECURITY_APPARMOR="y"
+--cfg=CONFIG_STACKTRACE_SUPPORT
+--cfg=CONFIG_STACKTRACE_SUPPORT="y"
+--cfg=CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
+--cfg=CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP="y"
+--cfg=CONFIG_SERIAL_8250_PCILIB
+--cfg=CONFIG_SERIAL_8250_PCILIB="y"
+--cfg=CONFIG_RESET_CONTROLLER
+--cfg=CONFIG_RESET_CONTROLLER="y"
+--cfg=CONFIG_ACPI
+--cfg=CONFIG_ACPI="y"
+--cfg=CONFIG_LOCKD
+--cfg=CONFIG_LOCKD="y"
+--cfg=CONFIG_USB_F_FS
+--cfg=CONFIG_USB_F_FS="m"
+--cfg=CONFIG_PCIE_DW_HOST
+--cfg=CONFIG_PCIE_DW_HOST="y"
+--cfg=CONFIG_CRYPTO_LIB_AES
+--cfg=CONFIG_CRYPTO_LIB_AES="m"
+--cfg=CONFIG_WLAN_VENDOR_RALINK
+--cfg=CONFIG_WLAN_VENDOR_RALINK="y"
+--cfg=CONFIG_CRYPTO_KPP2
+--cfg=CONFIG_CRYPTO_KPP2="y"
+--cfg=CONFIG_NET_VENDOR_MICROCHIP
+--cfg=CONFIG_NET_VENDOR_MICROCHIP="y"
+--cfg=CONFIG_FUNCTION_ALIGNMENT="0"
+--cfg=CONFIG_SOC_RENESAS
+--cfg=CONFIG_SOC_RENESAS="y"
+--cfg=CONFIG_PCI_HOST_GENERIC
+--cfg=CONFIG_PCI_HOST_GENERIC="y"
+--cfg=CONFIG_NET_UDP_TUNNEL
+--cfg=CONFIG_NET_UDP_TUNNEL="m"
+--cfg=CONFIG_RPCSEC_GSS_KRB5
+--cfg=CONFIG_RPCSEC_GSS_KRB5="y"
+--cfg=CONFIG_MTD_CFI_UTIL
+--cfg=CONFIG_MTD_CFI_UTIL="y"
+--cfg=CONFIG_NO_HZ_IDLE
+--cfg=CONFIG_NO_HZ_IDLE="y"
+--cfg=CONFIG_NET_VENDOR_ADAPTEC
+--cfg=CONFIG_NET_VENDOR_ADAPTEC="y"
+--cfg=CONFIG_MOUSE_PS2_BYD
+--cfg=CONFIG_MOUSE_PS2_BYD="y"
+--cfg=CONFIG_SERIAL_SH_SCI
+--cfg=CONFIG_SERIAL_SH_SCI="y"
+--cfg=CONFIG_SOCK_RX_QUEUE_MAPPING
+--cfg=CONFIG_SOCK_RX_QUEUE_MAPPING="y"
+--cfg=CONFIG_CRYPTO_DRBG_HMAC
+--cfg=CONFIG_CRYPTO_DRBG_HMAC="y"
+--cfg=CONFIG_DRM_BRIDGE
+--cfg=CONFIG_DRM_BRIDGE="y"
+--cfg=CONFIG_FB_DEVICE
+--cfg=CONFIG_FB_DEVICE="y"
+--cfg=CONFIG_USB_F_SERIAL
+--cfg=CONFIG_USB_F_SERIAL="m"
+--cfg=CONFIG_NET_VENDOR_SILAN
+--cfg=CONFIG_NET_VENDOR_SILAN="y"
+--cfg=CONFIG_PHY_RCAR_GEN3_USB2
+--cfg=CONFIG_PHY_RCAR_GEN3_USB2="y"
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7110_AON
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7110_AON="y"
+--cfg=CONFIG_RISCV_TIMER
+--cfg=CONFIG_RISCV_TIMER="y"
+--cfg=CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK
+--cfg=CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK="y"
+--cfg=CONFIG_USB_STORAGE
+--cfg=CONFIG_USB_STORAGE="y"
+--cfg=CONFIG_NET_VENDOR_BROADCOM
+--cfg=CONFIG_NET_VENDOR_BROADCOM="y"
+--cfg=CONFIG_FPU
+--cfg=CONFIG_FPU="y"
+--cfg=CONFIG_STANDALONE
+--cfg=CONFIG_STANDALONE="y"
+--cfg=CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+--cfg=CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE="y"
+--cfg=CONFIG_CPU_FREQ_GOV_PERFORMANCE
+--cfg=CONFIG_CPU_FREQ_GOV_PERFORMANCE="y"
+--cfg=CONFIG_EFI
+--cfg=CONFIG_EFI="y"
+--cfg=CONFIG_RATIONAL
+--cfg=CONFIG_RATIONAL="y"
+--cfg=CONFIG_WLAN_VENDOR_INTEL
+--cfg=CONFIG_WLAN_VENDOR_INTEL="y"
+--cfg=CONFIG_WATCHDOG_CORE
+--cfg=CONFIG_WATCHDOG_CORE="y"
+--cfg=CONFIG_GENERIC_EARLY_IOREMAP
+--cfg=CONFIG_GENERIC_EARLY_IOREMAP="y"
+--cfg=CONFIG_NET_VENDOR_WANGXUN
+--cfg=CONFIG_NET_VENDOR_WANGXUN="y"
+--cfg=CONFIG_NET_L3_MASTER_DEV
+--cfg=CONFIG_NET_L3_MASTER_DEV="y"
+--cfg=CONFIG_VMAP_STACK
+--cfg=CONFIG_VMAP_STACK="y"
+--cfg=CONFIG_BLOCK
+--cfg=CONFIG_BLOCK="y"
+--cfg=CONFIG_ARCH_STACKWALK
+--cfg=CONFIG_ARCH_STACKWALK="y"
+--cfg=CONFIG_INIT_ENV_ARG_LIMIT="32"
+--cfg=CONFIG_ROOT_NFS
+--cfg=CONFIG_ROOT_NFS="y"
+--cfg=CONFIG_AF_UNIX_OOB
+--cfg=CONFIG_AF_UNIX_OOB="y"
+--cfg=CONFIG_USER_NS
+--cfg=CONFIG_USER_NS="y"
+--cfg=CONFIG_TMPFS_POSIX_ACL
+--cfg=CONFIG_TMPFS_POSIX_ACL="y"
+--cfg=CONFIG_STRICT_KERNEL_RWX
+--cfg=CONFIG_STRICT_KERNEL_RWX="y"
+--cfg=CONFIG_NETLINK_DIAG
+--cfg=CONFIG_NETLINK_DIAG="y"
+--cfg=CONFIG_BUG
+--cfg=CONFIG_BUG="y"
+--cfg=CONFIG_ARCH_HAS_DEBUG_WX
+--cfg=CONFIG_ARCH_HAS_DEBUG_WX="y"
+--cfg=CONFIG_PCIE_XILINX
+--cfg=CONFIG_PCIE_XILINX="y"
+--cfg=CONFIG_PM
+--cfg=CONFIG_PM="y"
+--cfg=CONFIG_MEMCG
+--cfg=CONFIG_MEMCG="y"
+--cfg=CONFIG_SPI
+--cfg=CONFIG_SPI="y"
+--cfg=CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
+--cfg=CONFIG_NOUVEAU_DEBUG_DEFAULT="3"
+--cfg=CONFIG_OF_IRQ
+--cfg=CONFIG_OF_IRQ="y"
+--cfg=CONFIG_LIBFDT
+--cfg=CONFIG_LIBFDT="y"
+--cfg=CONFIG_NET_FAILOVER
+--cfg=CONFIG_NET_FAILOVER="y"
+--cfg=CONFIG_WLAN_VENDOR_PURELIFI
+--cfg=CONFIG_WLAN_VENDOR_PURELIFI="y"
+--cfg=CONFIG_IO_URING
+--cfg=CONFIG_IO_URING="y"
+--cfg=CONFIG_VT
+--cfg=CONFIG_VT="y"
+--cfg=CONFIG_STARFIVE_WATCHDOG
+--cfg=CONFIG_STARFIVE_WATCHDOG="y"
+--cfg=CONFIG_SECRETMEM
+--cfg=CONFIG_SECRETMEM="y"
+--cfg=CONFIG_DTC
+--cfg=CONFIG_DTC="y"
+--cfg=CONFIG_MACB
+--cfg=CONFIG_MACB="y"
+--cfg=CONFIG_REGMAP_SPI
+--cfg=CONFIG_REGMAP_SPI="y"
+--cfg=CONFIG_DMA_ACPI
+--cfg=CONFIG_DMA_ACPI="y"
+--cfg=CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED
+--cfg=CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED="y"
+--cfg=CONFIG_RESET_STARFIVE_JH71X0
+--cfg=CONFIG_RESET_STARFIVE_JH71X0="y"
+--cfg=CONFIG_SPLIT_PTLOCK_CPUS="4"
+--cfg=CONFIG_SBITMAP
+--cfg=CONFIG_SBITMAP="y"
+--cfg=CONFIG_MCHP_CLK_MPFS
+--cfg=CONFIG_MCHP_CLK_MPFS="y"
+--cfg=CONFIG_POWER_SUPPLY
+--cfg=CONFIG_POWER_SUPPLY="y"
+--cfg=CONFIG_DM_PERSISTENT_DATA
+--cfg=CONFIG_DM_PERSISTENT_DATA="m"
+--cfg=CONFIG_CRYPTO_SKCIPHER2
+--cfg=CONFIG_CRYPTO_SKCIPHER2="y"
+--cfg=CONFIG_NLS
+--cfg=CONFIG_NLS="y"
+--cfg=CONFIG_AS_IS_GNU
+--cfg=CONFIG_AS_IS_GNU="y"
+--cfg=CONFIG_MICROSEMI_PHY
+--cfg=CONFIG_MICROSEMI_PHY="y"
+--cfg=CONFIG_USB_CONFIGFS_ACM
+--cfg=CONFIG_USB_CONFIGFS_ACM="y"
+--cfg=CONFIG_CGROUP_BPF
+--cfg=CONFIG_CGROUP_BPF="y"
+--cfg=CONFIG_CPU_THERMAL
+--cfg=CONFIG_CPU_THERMAL="y"
+--cfg=CONFIG_IRQ_WORK
+--cfg=CONFIG_IRQ_WORK="y"
+--cfg=CONFIG_PCI_MSI
+--cfg=CONFIG_PCI_MSI="y"
+--cfg=CONFIG_IP_ADVANCED_ROUTER
+--cfg=CONFIG_IP_ADVANCED_ROUTER="y"
+--cfg=CONFIG_FB_SYS_COPYAREA
+--cfg=CONFIG_FB_SYS_COPYAREA="y"
+--cfg=CONFIG_USB_EHCI_PCI
+--cfg=CONFIG_USB_EHCI_PCI="y"
+--cfg=CONFIG_SPARSEMEM_EXTREME
+--cfg=CONFIG_SPARSEMEM_EXTREME="y"
+--cfg=CONFIG_USB_COMMON
+--cfg=CONFIG_USB_COMMON="y"
+--cfg=CONFIG_DRM_DISPLAY_HDMI_HELPER
+--cfg=CONFIG_DRM_DISPLAY_HDMI_HELPER="y"
+--cfg=CONFIG_IP6_NF_IPTABLES
+--cfg=CONFIG_IP6_NF_IPTABLES="m"
+--cfg=CONFIG_DRM_GPUVM
+--cfg=CONFIG_DRM_GPUVM="m"
+--cfg=CONFIG_VIRTIO_ANCHOR
+--cfg=CONFIG_VIRTIO_ANCHOR="y"
+--cfg=CONFIG_DEBUG_INFO_NONE
+--cfg=CONFIG_DEBUG_INFO_NONE="y"
+--cfg=CONFIG_FIXED_PHY
+--cfg=CONFIG_FIXED_PHY="y"
+--cfg=CONFIG_SOC_SIFIVE
+--cfg=CONFIG_SOC_SIFIVE="y"
+--cfg=CONFIG_GPIO_ACPI
+--cfg=CONFIG_GPIO_ACPI="y"
+--cfg=CONFIG_CPU_FREQ_GOV_USERSPACE
+--cfg=CONFIG_CPU_FREQ_GOV_USERSPACE="y"
+--cfg=CONFIG_LOG_CPU_MAX_BUF_SHIFT="12"
+--cfg=CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE="32"
+--cfg=CONFIG_BLK_DEV_DM_BUILTIN
+--cfg=CONFIG_BLK_DEV_DM_BUILTIN="y"
+--cfg=CONFIG_DRM_SUN8I_TCON_TOP
+--cfg=CONFIG_DRM_SUN8I_TCON_TOP="m"
+--cfg=CONFIG_VGA_ARB_MAX_GPUS="16"
+--cfg=CONFIG_GENERIC_PINCONF
+--cfg=CONFIG_GENERIC_PINCONF="y"
+--cfg=CONFIG_DEBUG_SG
+--cfg=CONFIG_DEBUG_SG="y"
+--cfg=CONFIG_NFS_V4_2_SSC_HELPER
+--cfg=CONFIG_NFS_V4_2_SSC_HELPER="y"
+--cfg=CONFIG_ARCH_HAS_BINFMT_FLAT
+--cfg=CONFIG_ARCH_HAS_BINFMT_FLAT="y"
+--cfg=CONFIG_SG_POOL
+--cfg=CONFIG_SG_POOL="y"
+--cfg=CONFIG_DRM_KMS_HELPER
+--cfg=CONFIG_DRM_KMS_HELPER="m"
+--cfg=CONFIG_NET_VENDOR_PACKET_ENGINES
+--cfg=CONFIG_NET_VENDOR_PACKET_ENGINES="y"
+--cfg=CONFIG_DMA_COHERENT_POOL
+--cfg=CONFIG_DMA_COHERENT_POOL="y"
+--cfg=CONFIG_TOOLCHAIN_HAS_ZBB
+--cfg=CONFIG_TOOLCHAIN_HAS_ZBB="y"
+--cfg=CONFIG_BLK_MQ_PCI
+--cfg=CONFIG_BLK_MQ_PCI="y"
+--cfg=CONFIG_CPU_FREQ_THERMAL
+--cfg=CONFIG_CPU_FREQ_THERMAL="y"
+--cfg=CONFIG_RISCV_PMU_LEGACY
+--cfg=CONFIG_RISCV_PMU_LEGACY="y"
+--cfg=CONFIG_NLS_ISO8859_1
+--cfg=CONFIG_NLS_ISO8859_1="m"
+--cfg=CONFIG_R8169
+--cfg=CONFIG_R8169="y"
+--cfg=CONFIG_MMC_SDHI
+--cfg=CONFIG_MMC_SDHI="y"
+--cfg=CONFIG_USB_EHCI_HCD
+--cfg=CONFIG_USB_EHCI_HCD="y"
+--cfg=CONFIG_FS_IOMAP
+--cfg=CONFIG_FS_IOMAP="y"
+--cfg=CONFIG_CAN_CALC_BITTIMING
+--cfg=CONFIG_CAN_CALC_BITTIMING="y"
+--cfg=CONFIG_COMPACT_UNEVICTABLE_DEFAULT="1"
+--cfg=CONFIG_RD_ZSTD
+--cfg=CONFIG_RD_ZSTD="y"
+--cfg=CONFIG_I2C_RIIC
+--cfg=CONFIG_I2C_RIIC="m"
+--cfg=CONFIG_NETDEVICES
+--cfg=CONFIG_NETDEVICES="y"
+--cfg=CONFIG_ARCH_HAS_KCOV
+--cfg=CONFIG_ARCH_HAS_KCOV="y"
+--cfg=CONFIG_CGROUP_FREEZER
+--cfg=CONFIG_CGROUP_FREEZER="y"
+--cfg=CONFIG_SPI_SUN6I
+--cfg=CONFIG_SPI_SUN6I="y"
+--cfg=CONFIG_EVENTFD
+--cfg=CONFIG_EVENTFD="y"
+--cfg=CONFIG_PHY_SUN4I_USB
+--cfg=CONFIG_PHY_SUN4I_USB="m"
+--cfg=CONFIG_DEBUG_RWSEMS
+--cfg=CONFIG_DEBUG_RWSEMS="y"
+--cfg=CONFIG_FS_POSIX_ACL
+--cfg=CONFIG_FS_POSIX_ACL="y"
+--cfg=CONFIG_IPV6_SIT
+--cfg=CONFIG_IPV6_SIT="y"
+--cfg=CONFIG_XFRM
+--cfg=CONFIG_XFRM="y"
+--cfg=CONFIG_ARCH_HAS_PMEM_API
+--cfg=CONFIG_ARCH_HAS_PMEM_API="y"
+--cfg=CONFIG_LINEAR_RANGES
+--cfg=CONFIG_LINEAR_RANGES="y"
+--cfg=CONFIG_HAVE_KPROBES_ON_FTRACE
+--cfg=CONFIG_HAVE_KPROBES_ON_FTRACE="y"
+--cfg=CONFIG_SERIAL_8250_CONSOLE
+--cfg=CONFIG_SERIAL_8250_CONSOLE="y"
+--cfg=CONFIG_CRYPTO_GENIV
+--cfg=CONFIG_CRYPTO_GENIV="m"
+--cfg=CONFIG_JUMP_LABEL
+--cfg=CONFIG_JUMP_LABEL="y"
+--cfg=CONFIG_OVERLAY_FS
+--cfg=CONFIG_OVERLAY_FS="m"
+--cfg=CONFIG_IP_NF_TARGET_MASQUERADE
+--cfg=CONFIG_IP_NF_TARGET_MASQUERADE="m"
+--cfg=CONFIG_HAVE_EBPF_JIT
+--cfg=CONFIG_HAVE_EBPF_JIT="y"
+--cfg=CONFIG_PROC_PAGE_MONITOR
+--cfg=CONFIG_PROC_PAGE_MONITOR="y"
+--cfg=CONFIG_USB_CONFIGFS_ECM
+--cfg=CONFIG_USB_CONFIGFS_ECM="y"
+--cfg=CONFIG_NETFILTER_XT_TARGET_MASQUERADE
+--cfg=CONFIG_NETFILTER_XT_TARGET_MASQUERADE="m"
+--cfg=CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+--cfg=CONFIG_MTD_SPI_NOR_USE_4K_SECTORS="y"
+--cfg=CONFIG_BPF
+--cfg=CONFIG_BPF="y"
+--cfg=CONFIG_DWMAC_SUN8I
+--cfg=CONFIG_DWMAC_SUN8I="m"
+--cfg=CONFIG_RD_LZO
+--cfg=CONFIG_RD_LZO="y"
+--cfg=CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+--cfg=CONFIG_HAVE_FUNCTION_GRAPH_RETVAL="y"
+--cfg=CONFIG_CRYPTO_SHA512
+--cfg=CONFIG_CRYPTO_SHA512="m"
+--cfg=CONFIG_BACKLIGHT_CLASS_DEVICE
+--cfg=CONFIG_BACKLIGHT_CLASS_DEVICE="m"
+--cfg=CONFIG_CC_HAS_ASM_INLINE
+--cfg=CONFIG_CC_HAS_ASM_INLINE="y"
+--cfg=CONFIG_MTD_OF_PARTS
+--cfg=CONFIG_MTD_OF_PARTS="y"
+--cfg=CONFIG_CRYPTO_NULL
+--cfg=CONFIG_CRYPTO_NULL="m"
+--cfg=CONFIG_GPIO_CDEV_V1
+--cfg=CONFIG_GPIO_CDEV_V1="y"
+--cfg=CONFIG_NET_VENDOR_SEEQ
+--cfg=CONFIG_NET_VENDOR_SEEQ="y"
+--cfg=CONFIG_NF_DEFRAG_IPV4
+--cfg=CONFIG_NF_DEFRAG_IPV4="m"
+--cfg=CONFIG_SELECT_MEMORY_MODEL
+--cfg=CONFIG_SELECT_MEMORY_MODEL="y"
+--cfg=CONFIG_VIRTIO_CONSOLE
+--cfg=CONFIG_VIRTIO_CONSOLE="y"
+--cfg=CONFIG_NETFILTER_ADVANCED
+--cfg=CONFIG_NETFILTER_ADVANCED="y"
+--cfg=CONFIG_GENERIC_STRNLEN_USER
+--cfg=CONFIG_GENERIC_STRNLEN_USER="y"
+--cfg=CONFIG_MTD_CFI
+--cfg=CONFIG_MTD_CFI="y"
+--cfg=CONFIG_RPMSG_VIRTIO
+--cfg=CONFIG_RPMSG_VIRTIO="y"
+--cfg=CONFIG_WLAN_VENDOR_RSI
+--cfg=CONFIG_WLAN_VENDOR_RSI="y"
+--cfg=CONFIG_CRYPTO_JITTERENTROPY
+--cfg=CONFIG_CRYPTO_JITTERENTROPY="m"
+--cfg=CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+--cfg=CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION="y"
+--cfg=CONFIG_CRYPTO_GCM
+--cfg=CONFIG_CRYPTO_GCM="m"
+--cfg=CONFIG_HAVE_DYNAMIC_FTRACE
+--cfg=CONFIG_HAVE_DYNAMIC_FTRACE="y"
+--cfg=CONFIG_CDROM
+--cfg=CONFIG_CDROM="y"
+--cfg=CONFIG_USB_CONFIGFS_F_FS
+--cfg=CONFIG_USB_CONFIGFS_F_FS="y"
+--cfg=CONFIG_PNFS_BLOCK
+--cfg=CONFIG_PNFS_BLOCK="m"
+--cfg=CONFIG_NET_VENDOR_RDC
+--cfg=CONFIG_NET_VENDOR_RDC="y"
+--cfg=CONFIG_PGTABLE_LEVELS="5"
+--cfg=CONFIG_POWER_RESET_SYSCON
+--cfg=CONFIG_POWER_RESET_SYSCON="y"
+--cfg=CONFIG_CPUSETS
+--cfg=CONFIG_CPUSETS="y"
+--cfg=CONFIG_ARCH_HAS_VDSO_DATA
+--cfg=CONFIG_ARCH_HAS_VDSO_DATA="y"
+--cfg=CONFIG_SPARSE_IRQ
+--cfg=CONFIG_SPARSE_IRQ="y"
+--cfg=CONFIG_IP_NF_MANGLE
+--cfg=CONFIG_IP_NF_MANGLE="m"
+--cfg=CONFIG_DT_IDLE_STATES
+--cfg=CONFIG_DT_IDLE_STATES="y"
+--cfg=CONFIG_SECURITYFS
+--cfg=CONFIG_SECURITYFS="y"
+--cfg=CONFIG_RCU_STALL_COMMON
+--cfg=CONFIG_RCU_STALL_COMMON="y"
+--cfg=CONFIG_PCIEPORTBUS
+--cfg=CONFIG_PCIEPORTBUS="y"
+--cfg=CONFIG_DEBUG_BUGVERBOSE
+--cfg=CONFIG_DEBUG_BUGVERBOSE="y"
+--cfg=CONFIG_EFI_GENERIC_STUB
+--cfg=CONFIG_EFI_GENERIC_STUB="y"
+--cfg=CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
+--cfg=CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK="y"
+--cfg=CONFIG_IP_NF_FILTER
+--cfg=CONFIG_IP_NF_FILTER="m"
+--cfg=CONFIG_MODULES_TREE_LOOKUP
+--cfg=CONFIG_MODULES_TREE_LOOKUP="y"
+--cfg=CONFIG_FAT_FS
+--cfg=CONFIG_FAT_FS="y"
+--cfg=CONFIG_BUILDTIME_TABLE_SORT
+--cfg=CONFIG_BUILDTIME_TABLE_SORT="y"
+--cfg=CONFIG_NVMEM
+--cfg=CONFIG_NVMEM="y"
+--cfg=CONFIG_INET_TUNNEL
+--cfg=CONFIG_INET_TUNNEL="y"
+--cfg=CONFIG_NF_LOG_ARP
+--cfg=CONFIG_NF_LOG_ARP="m"
+--cfg=CONFIG_NET_9P_VIRTIO
+--cfg=CONFIG_NET_9P_VIRTIO="y"
+--cfg=CONFIG_PINCONF
+--cfg=CONFIG_PINCONF="y"
+--cfg=CONFIG_BLOCK_HOLDER_DEPRECATED
+--cfg=CONFIG_BLOCK_HOLDER_DEPRECATED="y"
+--cfg=CONFIG_GENERIC_CLOCKEVENTS
+--cfg=CONFIG_GENERIC_CLOCKEVENTS="y"
+--cfg=CONFIG_OID_REGISTRY
+--cfg=CONFIG_OID_REGISTRY="y"
+--cfg=CONFIG_DWMAC_STARFIVE
+--cfg=CONFIG_DWMAC_STARFIVE="m"
+--cfg=CONFIG_CAN_NETLINK
+--cfg=CONFIG_CAN_NETLINK="y"
+--cfg=CONFIG_CONSOLE_TRANSLATIONS
+--cfg=CONFIG_CONSOLE_TRANSLATIONS="y"
+--cfg=CONFIG_ARCH_SUPPORTS_ATOMIC_RMW
+--cfg=CONFIG_ARCH_SUPPORTS_ATOMIC_RMW="y"
+--cfg=CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED
+--cfg=CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED="y"
+--cfg=CONFIG_RAID6_PQ
+--cfg=CONFIG_RAID6_PQ="m"
+--cfg=CONFIG_SUN6I_RTC_CCU
+--cfg=CONFIG_SUN6I_RTC_CCU="y"
+--cfg=CONFIG_JH71XX_PMU
+--cfg=CONFIG_JH71XX_PMU="y"
+--cfg=CONFIG_SERIAL_EARLYCON
+--cfg=CONFIG_SERIAL_EARLYCON="y"
+--cfg=CONFIG_ARCH_THEAD
+--cfg=CONFIG_ARCH_THEAD="y"
+--cfg=CONFIG_NET_VENDOR_NI
+--cfg=CONFIG_NET_VENDOR_NI="y"
+--cfg=CONFIG_CRYPTO_AKCIPHER
+--cfg=CONFIG_CRYPTO_AKCIPHER="y"
+--cfg=CONFIG_MMIOWB
+--cfg=CONFIG_MMIOWB="y"
+--cfg=CONFIG_ETHTOOL_NETLINK
+--cfg=CONFIG_ETHTOOL_NETLINK="y"
+--cfg=CONFIG_CPU_FREQ
+--cfg=CONFIG_CPU_FREQ="y"
+--cfg=CONFIG_USB_OHCI_HCD
+--cfg=CONFIG_USB_OHCI_HCD="y"
+--cfg=CONFIG_ARCH_SUPPORTS_KEXEC_FILE
+--cfg=CONFIG_ARCH_SUPPORTS_KEXEC_FILE="y"
+--cfg=CONFIG_DUMMY_CONSOLE
+--cfg=CONFIG_DUMMY_CONSOLE="y"
+--cfg=CONFIG_USB_PCI
+--cfg=CONFIG_USB_PCI="y"
+--cfg=CONFIG_NF_REJECT_IPV4
+--cfg=CONFIG_NF_REJECT_IPV4="m"
+--cfg=CONFIG_GENERIC_IOREMAP
+--cfg=CONFIG_GENERIC_IOREMAP="y"
+--cfg=CONFIG_ARCH_MMAP_RND_BITS_MAX="24"
+--cfg=CONFIG_MMC_DW_STARFIVE
+--cfg=CONFIG_MMC_DW_STARFIVE="y"
+--cfg=CONFIG_GPIO_SIFIVE
+--cfg=CONFIG_GPIO_SIFIVE="y"
+--cfg=CONFIG_NVMEM_SUNXI_SID
+--cfg=CONFIG_NVMEM_SUNXI_SID="y"
+--cfg=CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE
+--cfg=CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE="y"
+--cfg=CONFIG_TRACE_IRQFLAGS_SUPPORT
+--cfg=CONFIG_TRACE_IRQFLAGS_SUPPORT="y"
+--cfg=CONFIG_MFD_SYSCON
+--cfg=CONFIG_MFD_SYSCON="y"
+--cfg=CONFIG_DETECT_HUNG_TASK
+--cfg=CONFIG_DETECT_HUNG_TASK="y"
+--cfg=CONFIG_PCIE_BUS_DEFAULT
+--cfg=CONFIG_PCIE_BUS_DEFAULT="y"
+--cfg=CONFIG_CRYPTO_RNG
+--cfg=CONFIG_CRYPTO_RNG="m"
+--cfg=CONFIG_DRM_TTM_HELPER
+--cfg=CONFIG_DRM_TTM_HELPER="m"
+--cfg=CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN="8"
+--cfg=CONFIG_CRYPTO_LIB_UTILS
+--cfg=CONFIG_CRYPTO_LIB_UTILS="y"
+--cfg=CONFIG_SND_USB
+--cfg=CONFIG_SND_USB="y"
+--cfg=CONFIG_RD_GZIP
+--cfg=CONFIG_RD_GZIP="y"
+--cfg=CONFIG_HAVE_REGS_AND_STACK_ACCESS_API
+--cfg=CONFIG_HAVE_REGS_AND_STACK_ACCESS_API="y"
+--cfg=CONFIG_BLK_PM
+--cfg=CONFIG_BLK_PM="y"
+--cfg=CONFIG_SECURITY_APPARMOR_PARANOID_LOAD
+--cfg=CONFIG_SECURITY_APPARMOR_PARANOID_LOAD="y"
+--cfg=CONFIG_MDIO_BUS
+--cfg=CONFIG_MDIO_BUS="y"
+--cfg=CONFIG_TREE_RCU
+--cfg=CONFIG_TREE_RCU="y"
+--cfg=CONFIG_ALLOW_DEV_COREDUMP
+--cfg=CONFIG_ALLOW_DEV_COREDUMP="y"
+--cfg=CONFIG_SUNXI_CCU
+--cfg=CONFIG_SUNXI_CCU="y"
+--cfg=CONFIG_SWIOTLB
+--cfg=CONFIG_SWIOTLB="y"
+--cfg=CONFIG_EXT4_FS_SECURITY
+--cfg=CONFIG_EXT4_FS_SECURITY="y"
+--cfg=CONFIG_GRO_CELLS
+--cfg=CONFIG_GRO_CELLS="y"
+--cfg=CONFIG_SECURITY_APPARMOR_HASH_DEFAULT
+--cfg=CONFIG_SECURITY_APPARMOR_HASH_DEFAULT="y"
+--cfg=CONFIG_RISCV_INTC
+--cfg=CONFIG_RISCV_INTC="y"
+--cfg=CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN
+--cfg=CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN="y"
+--cfg=CONFIG_ATA_ACPI
+--cfg=CONFIG_ATA_ACPI="y"
+--cfg=CONFIG_NET_VENDOR_CORTINA
+--cfg=CONFIG_NET_VENDOR_CORTINA="y"
+--cfg=CONFIG_ELFCORE
+--cfg=CONFIG_ELFCORE="y"
+--cfg=CONFIG_WQ_WATCHDOG
+--cfg=CONFIG_WQ_WATCHDOG="y"
+--cfg=CONFIG_BINFMT_ELF
+--cfg=CONFIG_BINFMT_ELF="y"
+--cfg=CONFIG_AUDIT_GENERIC
+--cfg=CONFIG_AUDIT_GENERIC="y"
+--cfg=CONFIG_SOC_MICROCHIP_POLARFIRE
+--cfg=CONFIG_SOC_MICROCHIP_POLARFIRE="y"
+--cfg=CONFIG_SCSI_PROC_FS
+--cfg=CONFIG_SCSI_PROC_FS="y"
+--cfg=CONFIG_I2C_HID
+--cfg=CONFIG_I2C_HID="m"
+--cfg=CONFIG_HAVE_PERF_REGS
+--cfg=CONFIG_HAVE_PERF_REGS="y"
+--cfg=CONFIG_HAVE_KVM_MSI
+--cfg=CONFIG_HAVE_KVM_MSI="y"
+--cfg=CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+--cfg=CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT="y"
+--cfg=CONFIG_NFS_V4_SECURITY_LABEL
+--cfg=CONFIG_NFS_V4_SECURITY_LABEL="y"
+--cfg=CONFIG_NET_VENDOR_ALTEON
+--cfg=CONFIG_NET_VENDOR_ALTEON="y"
+--cfg=CONFIG_REGULATOR_FIXED_VOLTAGE
+--cfg=CONFIG_REGULATOR_FIXED_VOLTAGE="y"
+--cfg=CONFIG_NET_VENDOR_RENESAS
+--cfg=CONFIG_NET_VENDOR_RENESAS="y"
+--cfg=CONFIG_KEYS
+--cfg=CONFIG_KEYS="y"
+--cfg=CONFIG_ERRATA_THEAD_PBMT
+--cfg=CONFIG_ERRATA_THEAD_PBMT="y"
+--cfg=CONFIG_DEBUG_MUTEXES
+--cfg=CONFIG_DEBUG_MUTEXES="y"
+--cfg=CONFIG_NETFILTER_XT_MARK
+--cfg=CONFIG_NETFILTER_XT_MARK="m"
+--cfg=CONFIG_NETFILTER_XTABLES
+--cfg=CONFIG_NETFILTER_XTABLES="m"
+--cfg=CONFIG_DRM_PANEL_ORIENTATION_QUIRKS
+--cfg=CONFIG_DRM_PANEL_ORIENTATION_QUIRKS="m"
+--cfg=CONFIG_SOFTLOCKUP_DETECTOR
+--cfg=CONFIG_SOFTLOCKUP_DETECTOR="y"
+--cfg=CONFIG_CRYPTO_ECHAINIV
+--cfg=CONFIG_CRYPTO_ECHAINIV="m"
+--cfg=CONFIG_HAVE_ARCH_AUDITSYSCALL
+--cfg=CONFIG_HAVE_ARCH_AUDITSYSCALL="y"
+--cfg=CONFIG_RTC_DRV_GOLDFISH
+--cfg=CONFIG_RTC_DRV_GOLDFISH="y"
+--cfg=CONFIG_LEGACY_TIOCSTI
+--cfg=CONFIG_LEGACY_TIOCSTI="y"
+--cfg=CONFIG_CRYPTO_USER_API_HASH
+--cfg=CONFIG_CRYPTO_USER_API_HASH="y"
+--cfg=CONFIG_MTD_CFI_ADV_OPTIONS
+--cfg=CONFIG_MTD_CFI_ADV_OPTIONS="y"
+--cfg=CONFIG_PM_SLEEP_SMP
+--cfg=CONFIG_PM_SLEEP_SMP="y"
+--cfg=CONFIG_CRYPTO_HW
+--cfg=CONFIG_CRYPTO_HW="y"
+--cfg=CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC
+--cfg=CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC="y"
+--cfg=CONFIG_ACPI_AC
+--cfg=CONFIG_ACPI_AC="y"
+--cfg=CONFIG_HARDIRQS_SW_RESEND
+--cfg=CONFIG_HARDIRQS_SW_RESEND="y"
+--cfg=CONFIG_ARCH_MICROCHIP_POLARFIRE
+--cfg=CONFIG_ARCH_MICROCHIP_POLARFIRE="y"
+--cfg=CONFIG_SPI_MASTER
+--cfg=CONFIG_SPI_MASTER="y"
+--cfg=CONFIG_IRQ_STACKS
+--cfg=CONFIG_IRQ_STACKS="y"
+--cfg=CONFIG_VT_HW_CONSOLE_BINDING
+--cfg=CONFIG_VT_HW_CONSOLE_BINDING="y"
+--cfg=CONFIG_THERMAL_HWMON
+--cfg=CONFIG_THERMAL_HWMON="y"
+--cfg=CONFIG_CRYPTO_SKCIPHER
+--cfg=CONFIG_CRYPTO_SKCIPHER="y"
+--cfg=CONFIG_XZ_DEC_X86
+--cfg=CONFIG_XZ_DEC_X86="y"
+--cfg=CONFIG_SERIAL_OF_PLATFORM
+--cfg=CONFIG_SERIAL_OF_PLATFORM="y"
+--cfg=CONFIG_SERIAL_8250_DW
+--cfg=CONFIG_SERIAL_8250_DW="y"
+--cfg=CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS="0"
+--cfg=CONFIG_CONSOLE_LOGLEVEL_QUIET="4"
+--cfg=CONFIG_NVME_CORE
+--cfg=CONFIG_NVME_CORE="m"
+--cfg=CONFIG_CRC16
+--cfg=CONFIG_CRC16="y"
+--cfg=CONFIG_GENERIC_CALIBRATE_DELAY
+--cfg=CONFIG_GENERIC_CALIBRATE_DELAY="y"
+--cfg=CONFIG_NET_CLS
+--cfg=CONFIG_NET_CLS="y"
+--cfg=CONFIG_TMPFS
+--cfg=CONFIG_TMPFS="y"
+--cfg=CONFIG_NET_VENDOR_NETERION
+--cfg=CONFIG_NET_VENDOR_NETERION="y"
+--cfg=CONFIG_RANDSTRUCT_NONE
+--cfg=CONFIG_RANDSTRUCT_NONE="y"
+--cfg=CONFIG_USB_OHCI_HCD_PLATFORM
+--cfg=CONFIG_USB_OHCI_HCD_PLATFORM="y"
+--cfg=CONFIG_FUTEX
+--cfg=CONFIG_FUTEX="y"
+--cfg=CONFIG_IP_VS_MH_TAB_INDEX="12"
+--cfg=CONFIG_IP_PNP_DHCP
+--cfg=CONFIG_IP_PNP_DHCP="y"
+--cfg=CONFIG_GENERIC_PHY_MIPI_DPHY
+--cfg=CONFIG_GENERIC_PHY_MIPI_DPHY="y"
+--cfg=CONFIG_VIRTIO_PCI
+--cfg=CONFIG_VIRTIO_PCI="y"
+--cfg=CONFIG_UNIX_SCM
+--cfg=CONFIG_UNIX_SCM="y"
+--cfg=CONFIG_MMC_SPI
+--cfg=CONFIG_MMC_SPI="y"
+--cfg=CONFIG_CONSOLE_LOGLEVEL_DEFAULT="7"
+--cfg=CONFIG_REGMAP_I2C
+--cfg=CONFIG_REGMAP_I2C="m"
+--cfg=CONFIG_GENERIC_SCHED_CLOCK
+--cfg=CONFIG_GENERIC_SCHED_CLOCK="y"
+--cfg=CONFIG_NET_VENDOR_REALTEK
+--cfg=CONFIG_NET_VENDOR_REALTEK="y"
+--cfg=CONFIG_CRYPTO_USER_API
+--cfg=CONFIG_CRYPTO_USER_API="y"
+--cfg=CONFIG_RTC_HCTOSYS
+--cfg=CONFIG_RTC_HCTOSYS="y"
+--cfg=CONFIG_SECURITY_NETWORK
+--cfg=CONFIG_SECURITY_NETWORK="y"
+--cfg=CONFIG_SERIAL_CORE_CONSOLE
+--cfg=CONFIG_SERIAL_CORE_CONSOLE="y"
+--cfg=CONFIG_HUGETLB_PAGE
+--cfg=CONFIG_HUGETLB_PAGE="y"
+--cfg=CONFIG_NET_VENDOR_EMULEX
+--cfg=CONFIG_NET_VENDOR_EMULEX="y"
+--cfg=CONFIG_USB_HID
+--cfg=CONFIG_USB_HID="y"
+--cfg=CONFIG_LD_ORPHAN_WARN_LEVEL="warn"
+--cfg=CONFIG_SLUB_DEBUG
+--cfg=CONFIG_SLUB_DEBUG="y"
+--cfg=CONFIG_UCS2_STRING
+--cfg=CONFIG_UCS2_STRING="y"
+--cfg=CONFIG_SND_SOC_WM8978
+--cfg=CONFIG_SND_SOC_WM8978="m"
+--cfg=CONFIG_USB_F_RNDIS
+--cfg=CONFIG_USB_F_RNDIS="m"
+--cfg=CONFIG_DMADEVICES
+--cfg=CONFIG_DMADEVICES="y"
+--cfg=CONFIG_PAHOLE_VERSION="0"
+--cfg=CONFIG_PINCTRL_RENESAS
+--cfg=CONFIG_PINCTRL_RENESAS="y"
+--cfg=CONFIG_PINCTRL
+--cfg=CONFIG_PINCTRL="y"
+--cfg=CONFIG_ARCH_SUPPORTS_CFI_CLANG
+--cfg=CONFIG_ARCH_SUPPORTS_CFI_CLANG="y"
+--cfg=CONFIG_IPV6_NDISC_NODETYPE
+--cfg=CONFIG_IPV6_NDISC_NODETYPE="y"
+--cfg=CONFIG_PCI_LABEL
+--cfg=CONFIG_PCI_LABEL="y"
+--cfg=CONFIG_CGROUP_SCHED
+--cfg=CONFIG_CGROUP_SCHED="y"
+--cfg=CONFIG_QUEUED_RWLOCKS
+--cfg=CONFIG_QUEUED_RWLOCKS="y"
+--cfg=CONFIG_SYSVIPC
+--cfg=CONFIG_SYSVIPC="y"
+--cfg=CONFIG_RAID6_PQ_BENCHMARK
+--cfg=CONFIG_RAID6_PQ_BENCHMARK="y"
+--cfg=CONFIG_ARCH_HAS_GIGANTIC_PAGE
+--cfg=CONFIG_ARCH_HAS_GIGANTIC_PAGE="y"
+--cfg=CONFIG_USB_CONFIGFS_ECM_SUBSET
+--cfg=CONFIG_USB_CONFIGFS_ECM_SUBSET="y"
+--cfg=CONFIG_HAVE_DEBUG_KMEMLEAK
+--cfg=CONFIG_HAVE_DEBUG_KMEMLEAK="y"
+--cfg=CONFIG_NF_CONNTRACK_FTP
+--cfg=CONFIG_NF_CONNTRACK_FTP="m"
+--cfg=CONFIG_PAGE_SIZE_LESS_THAN_64KB
+--cfg=CONFIG_PAGE_SIZE_LESS_THAN_64KB="y"
+--cfg=CONFIG_USB_MUSB_SUNXI
+--cfg=CONFIG_USB_MUSB_SUNXI="m"
+--cfg=CONFIG_MODULES
+--cfg=CONFIG_MODULES="y"
+--cfg=CONFIG_RPMSG
+--cfg=CONFIG_RPMSG="y"
+--cfg=CONFIG_DEBUG_PER_CPU_MAPS
+--cfg=CONFIG_DEBUG_PER_CPU_MAPS="y"
+--cfg=CONFIG_USB_GADGET
+--cfg=CONFIG_USB_GADGET="y"
+--cfg=CONFIG_CONTEXT_TRACKING
+--cfg=CONFIG_CONTEXT_TRACKING="y"
+--cfg=CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATION
+--cfg=CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATION="y"
+--cfg=CONFIG_MQ_IOSCHED_DEADLINE
+--cfg=CONFIG_MQ_IOSCHED_DEADLINE="y"
+--cfg=CONFIG_GENERIC_IRQ_IPI
+--cfg=CONFIG_GENERIC_IRQ_IPI="y"
+--cfg=CONFIG_DUMMY_CONSOLE_COLUMNS="80"
+--cfg=CONFIG_XXHASH
+--cfg=CONFIG_XXHASH="y"
+--cfg=CONFIG_SOUND
+--cfg=CONFIG_SOUND="y"
+--cfg=CONFIG_JOLIET
+--cfg=CONFIG_JOLIET="y"
+--cfg=CONFIG_ARCH_SUNXI
+--cfg=CONFIG_ARCH_SUNXI="y"
+--cfg=CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+--cfg=CONFIG_CPU_IDLE_MULTIPLE_DRIVERS="y"
+--cfg=CONFIG_PINCTRL_SUN20I_D1
+--cfg=CONFIG_PINCTRL_SUN20I_D1="y"
+--cfg=CONFIG_PROC_CHILDREN
+--cfg=CONFIG_PROC_CHILDREN="y"
+--cfg=CONFIG_UNIX
+--cfg=CONFIG_UNIX="y"
+--cfg=CONFIG_USB_NET_DRIVERS
+--cfg=CONFIG_USB_NET_DRIVERS="y"
+--cfg=CONFIG_CC_CAN_LINK
+--cfg=CONFIG_CC_CAN_LINK="y"
+--cfg=CONFIG_LD_IS_BFD
+--cfg=CONFIG_LD_IS_BFD="y"
+--cfg=CONFIG_NO_HZ_COMMON
+--cfg=CONFIG_NO_HZ_COMMON="y"
+--cfg=CONFIG_DRM_MIPI_DSI
+--cfg=CONFIG_DRM_MIPI_DSI="y"
+--cfg=CONFIG_HAVE_CLK
+--cfg=CONFIG_HAVE_CLK="y"
+--cfg=CONFIG_CRYPTO_HASH2
+--cfg=CONFIG_CRYPTO_HASH2="y"
+--cfg=CONFIG_NET_VENDOR_VERTEXCOM
+--cfg=CONFIG_NET_VENDOR_VERTEXCOM="y"
+--cfg=CONFIG_THERMAL_GOV_STEP_WISE
+--cfg=CONFIG_THERMAL_GOV_STEP_WISE="y"
+--cfg=CONFIG_DEFAULT_HOSTNAME="(none)"
+--cfg=CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS="2"
+--cfg=CONFIG_CC_HAS_NO_PROFILE_FN_ATTR
+--cfg=CONFIG_CC_HAS_NO_PROFILE_FN_ATTR="y"
+--cfg=CONFIG_CPU_FREQ_GOV_POWERSAVE
+--cfg=CONFIG_CPU_FREQ_GOV_POWERSAVE="m"
+--cfg=CONFIG_NFS_FS
+--cfg=CONFIG_NFS_FS="y"
+--cfg=CONFIG_SUNXI_SRAM
+--cfg=CONFIG_SUNXI_SRAM="y"
+--cfg=CONFIG_SUN8I_DE2_CCU
+--cfg=CONFIG_SUN8I_DE2_CCU="m"
+--cfg=CONFIG_MEMBARRIER
+--cfg=CONFIG_MEMBARRIER="y"
+--cfg=CONFIG_XPS
+--cfg=CONFIG_XPS="y"
+--cfg=CONFIG_INET_ESP
+--cfg=CONFIG_INET_ESP="m"
+--cfg=CONFIG_SECURITY_SELINUX_DEVELOP
+--cfg=CONFIG_SECURITY_SELINUX_DEVELOP="y"
+--cfg=CONFIG_SGL_ALLOC
+--cfg=CONFIG_SGL_ALLOC="y"
+--cfg=CONFIG_LZ4_DECOMPRESS
+--cfg=CONFIG_LZ4_DECOMPRESS="y"
+--cfg=CONFIG_FONT_SUPPORT
+--cfg=CONFIG_FONT_SUPPORT="y"
+--cfg=CONFIG_ADVISE_SYSCALLS
+--cfg=CONFIG_ADVISE_SYSCALLS="y"
+--cfg=CONFIG_MD
+--cfg=CONFIG_MD="y"
+--cfg=CONFIG_CRYPTO_ALGAPI
+--cfg=CONFIG_CRYPTO_ALGAPI="y"
+--cfg=CONFIG_GENERIC_IRQ_SHOW_LEVEL
+--cfg=CONFIG_GENERIC_IRQ_SHOW_LEVEL="y"
+--cfg=CONFIG_HOTPLUG_CORE_SYNC
+--cfg=CONFIG_HOTPLUG_CORE_SYNC="y"
+--cfg=CONFIG_NET_VENDOR_WIZNET
+--cfg=CONFIG_NET_VENDOR_WIZNET="y"
+--cfg=CONFIG_BRIDGE
+--cfg=CONFIG_BRIDGE="m"
+--cfg=CONFIG_SCHED_MM_CID
+--cfg=CONFIG_SCHED_MM_CID="y"
+--cfg=CONFIG_RD_BZIP2
+--cfg=CONFIG_RD_BZIP2="y"
+--cfg=CONFIG_SKB_EXTENSIONS
+--cfg=CONFIG_SKB_EXTENSIONS="y"
+--cfg=CONFIG_PM_OPP
+--cfg=CONFIG_PM_OPP="y"
+--cfg=CONFIG_GPIO_CDEV
+--cfg=CONFIG_GPIO_CDEV="y"
+--cfg=CONFIG_CC_VERSION_TEXT="riscv64-unknown-elf-gcc () 13.2.0"
+--cfg=CONFIG_KEYBOARD_ATKBD
+--cfg=CONFIG_KEYBOARD_ATKBD="y"
+--cfg=CONFIG_LIBNVDIMM
+--cfg=CONFIG_LIBNVDIMM="y"
+--cfg=CONFIG_NET_IP_TUNNEL
+--cfg=CONFIG_NET_IP_TUNNEL="y"
+--cfg=CONFIG_MTD_CFI_I1
+--cfg=CONFIG_MTD_CFI_I1="y"
+--cfg=CONFIG_NF_NAT
+--cfg=CONFIG_NF_NAT="m"
+--cfg=CONFIG_BLOCK_LEGACY_AUTOLOAD
+--cfg=CONFIG_BLOCK_LEGACY_AUTOLOAD="y"
+--cfg=CONFIG_NET_VENDOR_OKI
+--cfg=CONFIG_NET_VENDOR_OKI="y"
+--cfg=CONFIG_CPU_IDLE
+--cfg=CONFIG_CPU_IDLE="y"
+--cfg=CONFIG_WLAN_VENDOR_INTERSIL
+--cfg=CONFIG_WLAN_VENDOR_INTERSIL="y"
+--cfg=CONFIG_NFS_COMMON
+--cfg=CONFIG_NFS_COMMON="y"
+--cfg=CONFIG_REGULATOR
+--cfg=CONFIG_REGULATOR="y"
+--cfg=CONFIG_FAIR_GROUP_SCHED
+--cfg=CONFIG_FAIR_GROUP_SCHED="y"
+--cfg=CONFIG_CRYPTO_HASH
+--cfg=CONFIG_CRYPTO_HASH="y"
+--cfg=CONFIG_DRM_SUN8I_MIXER
+--cfg=CONFIG_DRM_SUN8I_MIXER="m"
+--cfg=CONFIG_EFI_PARTITION
+--cfg=CONFIG_EFI_PARTITION="y"
+--cfg=CONFIG_GOLDFISH
+--cfg=CONFIG_GOLDFISH="y"
+--cfg=CONFIG_LOG_BUF_SHIFT="17"
+--cfg=CONFIG_WLAN_VENDOR_ATH
+--cfg=CONFIG_WLAN_VENDOR_ATH="y"
+--cfg=CONFIG_EXTRA_FIRMWARE=""
+--cfg=CONFIG_RUNTIME_KERNEL_TESTING_MENU
+--cfg=CONFIG_RUNTIME_KERNEL_TESTING_MENU="y"
+--cfg=CONFIG_NET_VENDOR_8390
+--cfg=CONFIG_NET_VENDOR_8390="y"
+--cfg=CONFIG_ACPI_VIDEO
+--cfg=CONFIG_ACPI_VIDEO="m"
+--cfg=CONFIG_HAVE_KCSAN_COMPILER
+--cfg=CONFIG_HAVE_KCSAN_COMPILER="y"
+--cfg=CONFIG_VFAT_FS
+--cfg=CONFIG_VFAT_FS="y"
+--cfg=CONFIG_ARCH_SUPPORTS_ACPI
+--cfg=CONFIG_ARCH_SUPPORTS_ACPI="y"
+--cfg=CONFIG_PID_NS
+--cfg=CONFIG_PID_NS="y"
+--cfg=CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE
+--cfg=CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE="y"
+--cfg=CONFIG_CRC32_SLICEBY8
+--cfg=CONFIG_CRC32_SLICEBY8="y"
+--cfg=CONFIG_USB_LIBCOMPOSITE
+--cfg=CONFIG_USB_LIBCOMPOSITE="m"
+--cfg=CONFIG_EFI_PARAMS_FROM_FDT
+--cfg=CONFIG_EFI_PARAMS_FROM_FDT="y"
+--cfg=CONFIG_BLK_DEV_SR
+--cfg=CONFIG_BLK_DEV_SR="y"
+--cfg=CONFIG_CPU_RMAP
+--cfg=CONFIG_CPU_RMAP="y"
+--cfg=CONFIG_BLK_DEV_LOOP
+--cfg=CONFIG_BLK_DEV_LOOP="y"
+--cfg=CONFIG_DEFAULT_HUNG_TASK_TIMEOUT="120"
+--cfg=CONFIG_VIRTIO_PCI_LEGACY
+--cfg=CONFIG_VIRTIO_PCI_LEGACY="y"
+--cfg=CONFIG_SPI_MEM
+--cfg=CONFIG_SPI_MEM="y"
+--cfg=CONFIG_E1000E
+--cfg=CONFIG_E1000E="y"
+--cfg=CONFIG_INPUT_VIVALDIFMAP
+--cfg=CONFIG_INPUT_VIVALDIFMAP="y"
+--cfg=CONFIG_MULTIUSER
+--cfg=CONFIG_MULTIUSER="y"
+--cfg=CONFIG_DMA_OF
+--cfg=CONFIG_DMA_OF="y"
+--cfg=CONFIG_SUSPEND
+--cfg=CONFIG_SUSPEND="y"
+--cfg=CONFIG_GENERIC_VDSO_TIME_NS
+--cfg=CONFIG_GENERIC_VDSO_TIME_NS="y"
+--cfg=CONFIG_CROSS_MEMORY_ATTACH
+--cfg=CONFIG_CROSS_MEMORY_ATTACH="y"
+--cfg=CONFIG_CRYPTO_CBC
+--cfg=CONFIG_CRYPTO_CBC="m"
+--cfg=CONFIG_RENESAS_OSTM
+--cfg=CONFIG_RENESAS_OSTM="y"
+--cfg=CONFIG_PINCTRL_SUNXI
+--cfg=CONFIG_PINCTRL_SUNXI="y"
+--cfg=CONFIG_SUN50I_IOMMU
+--cfg=CONFIG_SUN50I_IOMMU="y"
+--cfg=CONFIG_SERIAL_8250_RUNTIME_UARTS="4"
+--cfg=CONFIG_CLANG_VERSION="0"
+--cfg=CONFIG_FS_MBCACHE
+--cfg=CONFIG_FS_MBCACHE="y"
+--cfg=CONFIG_RTC_CLASS
+--cfg=CONFIG_RTC_CLASS="y"
+--cfg=CONFIG_CRC7
+--cfg=CONFIG_CRC7="y"
+--cfg=CONFIG_CRYPTO_RNG_DEFAULT
+--cfg=CONFIG_CRYPTO_RNG_DEFAULT="m"
+--cfg=CONFIG_TMPFS_XATTR
+--cfg=CONFIG_TMPFS_XATTR="y"
+--cfg=CONFIG_EXT4_USE_FOR_EXT2
+--cfg=CONFIG_EXT4_USE_FOR_EXT2="y"
+--cfg=CONFIG_USB_RENESAS_USBHS
+--cfg=CONFIG_USB_RENESAS_USBHS="m"
+--cfg=CONFIG_ARM_AMBA
+--cfg=CONFIG_ARM_AMBA="y"
+--cfg=CONFIG_CPU_PM
+--cfg=CONFIG_CPU_PM="y"
+--cfg=CONFIG_TIMER_OF
+--cfg=CONFIG_TIMER_OF="y"
+--cfg=CONFIG_ARCH_HAS_DMA_PREP_COHERENT
+--cfg=CONFIG_ARCH_HAS_DMA_PREP_COHERENT="y"
+--cfg=CONFIG_HAVE_FUNCTION_TRACER
+--cfg=CONFIG_HAVE_FUNCTION_TRACER="y"
+--cfg=CONFIG_GENERIC_PHY
+--cfg=CONFIG_GENERIC_PHY="y"
+--cfg=CONFIG_CPU_ISOLATION
+--cfg=CONFIG_CPU_ISOLATION="y"
+--cfg=CONFIG_NF_NAT_TFTP
+--cfg=CONFIG_NF_NAT_TFTP="m"
+--cfg=CONFIG_MTD_SPI_NOR
+--cfg=CONFIG_MTD_SPI_NOR="y"
+--cfg=CONFIG_DRM_DISPLAY_DP_HELPER
+--cfg=CONFIG_DRM_DISPLAY_DP_HELPER="y"
+--cfg=CONFIG_ARCH_SELECT_MEMORY_MODEL
+--cfg=CONFIG_ARCH_SELECT_MEMORY_MODEL="y"
+--cfg=CONFIG_NETFILTER_XT_TARGET_REDIRECT
+--cfg=CONFIG_NETFILTER_XT_TARGET_REDIRECT="m"
+--cfg=CONFIG_CRYPTO_MANAGER2
+--cfg=CONFIG_CRYPTO_MANAGER2="y"
+--cfg=CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
+--cfg=CONFIG_SERIAL_8250_DEPRECATED_OPTIONS="y"
+--cfg=CONFIG_USB_GADGET_VBUS_DRAW="2"
+--cfg=CONFIG_ARCH_HAS_PTE_SPECIAL
+--cfg=CONFIG_ARCH_HAS_PTE_SPECIAL="y"
+--cfg=CONFIG_NET_VENDOR_MYRI
+--cfg=CONFIG_NET_VENDOR_MYRI="y"
+--cfg=CONFIG_NF_NAT_MASQUERADE
+--cfg=CONFIG_NF_NAT_MASQUERADE="y"
+--cfg=CONFIG_PM_GENERIC_DOMAINS_OF
+--cfg=CONFIG_PM_GENERIC_DOMAINS_OF="y"
+--cfg=CONFIG_DEBUG_VM_PGTABLE
+--cfg=CONFIG_DEBUG_VM_PGTABLE="y"
+--cfg=CONFIG_CLZ_TAB
+--cfg=CONFIG_CLZ_TAB="y"
+--cfg=CONFIG_GENERIC_PCI_IOMAP
+--cfg=CONFIG_GENERIC_PCI_IOMAP="y"
+--cfg=CONFIG_SLUB
+--cfg=CONFIG_SLUB="y"
+--cfg=CONFIG_CONFIGFS_FS
+--cfg=CONFIG_CONFIGFS_FS="m"
+--cfg=CONFIG_XZ_DEC_BCJ
+--cfg=CONFIG_XZ_DEC_BCJ="y"
+--cfg=CONFIG_PM_SLEEP
+--cfg=CONFIG_PM_SLEEP="y"
+--cfg=CONFIG_I2C
+--cfg=CONFIG_I2C="m"
+--cfg=CONFIG_DEBUG_VM
+--cfg=CONFIG_DEBUG_VM="y"
+--cfg=CONFIG_RISCV_ISA_SVPBMT
+--cfg=CONFIG_RISCV_ISA_SVPBMT="y"
+--cfg=CONFIG_MMC_SDHI_INTERNAL_DMAC
+--cfg=CONFIG_MMC_SDHI_INTERNAL_DMAC="y"
+--cfg=CONFIG_BINFMT_SCRIPT
+--cfg=CONFIG_BINFMT_SCRIPT="y"
+--cfg=CONFIG_EFI_STUB
+--cfg=CONFIG_EFI_STUB="y"
+--cfg=CONFIG_MOUSE_PS2_CYPRESS
+--cfg=CONFIG_MOUSE_PS2_CYPRESS="y"
+--cfg=CONFIG_FRAME_POINTER
+--cfg=CONFIG_FRAME_POINTER="y"
+--cfg=CONFIG_MOUSE_PS2_LOGIPS2PP
+--cfg=CONFIG_MOUSE_PS2_LOGIPS2PP="y"
+--cfg=CONFIG_TICK_CPU_ACCOUNTING
+--cfg=CONFIG_TICK_CPU_ACCOUNTING="y"
+--cfg=CONFIG_VM_EVENT_COUNTERS
+--cfg=CONFIG_VM_EVENT_COUNTERS="y"
+--cfg=CONFIG_SCHED_STACK_END_CHECK
+--cfg=CONFIG_SCHED_STACK_END_CHECK="y"
+--cfg=CONFIG_RESET_SUNXI
+--cfg=CONFIG_RESET_SUNXI="y"
+--cfg=CONFIG_CRYPTO_ECB
+--cfg=CONFIG_CRYPTO_ECB="y"
+--cfg=CONFIG_WLAN_VENDOR_BROADCOM
+--cfg=CONFIG_WLAN_VENDOR_BROADCOM="y"
+--cfg=CONFIG_DEBUG_FS
+--cfg=CONFIG_DEBUG_FS="y"
+--cfg=CONFIG_NET_VENDOR_AMD
+--cfg=CONFIG_NET_VENDOR_AMD="y"
+--cfg=CONFIG_DRM_TTM
+--cfg=CONFIG_DRM_TTM="m"
+--cfg=CONFIG_BASE_FULL
+--cfg=CONFIG_BASE_FULL="y"
+--cfg=CONFIG_FB_CFB_IMAGEBLIT
+--cfg=CONFIG_FB_CFB_IMAGEBLIT="y"
+--cfg=CONFIG_ZLIB_DEFLATE
+--cfg=CONFIG_ZLIB_DEFLATE="m"
+--cfg=CONFIG_SUNRPC
+--cfg=CONFIG_SUNRPC="y"
+--cfg=CONFIG_RPMSG_NS
+--cfg=CONFIG_RPMSG_NS="y"
+--cfg=CONFIG_RENESAS_RZG2LWDT
+--cfg=CONFIG_RENESAS_RZG2LWDT="y"
+--cfg=CONFIG_CACHESTAT_SYSCALL
+--cfg=CONFIG_CACHESTAT_SYSCALL="y"
+--cfg=CONFIG_RSEQ
+--cfg=CONFIG_RSEQ="y"
+--cfg=CONFIG_FW_LOADER
+--cfg=CONFIG_FW_LOADER="y"
+--cfg=CONFIG_KALLSYMS
+--cfg=CONFIG_KALLSYMS="y"
+--cfg=CONFIG_COMMON_CLK
+--cfg=CONFIG_COMMON_CLK="y"
+--cfg=CONFIG_STACKPROTECTOR_STRONG
+--cfg=CONFIG_STACKPROTECTOR_STRONG="y"
+--cfg=CONFIG_PCI
+--cfg=CONFIG_PCI="y"
+--cfg=CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+--cfg=CONFIG_NET_VENDOR_FUNGIBLE
+--cfg=CONFIG_NET_VENDOR_FUNGIBLE="y"
+--cfg=CONFIG_NET_VENDOR_ASIX
+--cfg=CONFIG_NET_VENDOR_ASIX="y"
+--cfg=CONFIG_DECOMPRESS_XZ
+--cfg=CONFIG_DECOMPRESS_XZ="y"
+--cfg=CONFIG_PCI_QUIRKS
+--cfg=CONFIG_PCI_QUIRKS="y"
+--cfg=CONFIG_MII
+--cfg=CONFIG_MII="y"
+--cfg=CONFIG_MD_BITMAP_FILE
+--cfg=CONFIG_MD_BITMAP_FILE="y"
+--cfg=CONFIG_SIGNALFD
+--cfg=CONFIG_SIGNALFD="y"
+--cfg=CONFIG_NET_CORE
+--cfg=CONFIG_NET_CORE="y"
+--cfg=CONFIG_MOUSE_PS2_ALPS
+--cfg=CONFIG_MOUSE_PS2_ALPS="y"
+--cfg=CONFIG_EXT4_FS
+--cfg=CONFIG_EXT4_FS="y"
+--cfg=CONFIG_MEMORY_BALLOON
+--cfg=CONFIG_MEMORY_BALLOON="y"
+--cfg=CONFIG_UNINLINE_SPIN_UNLOCK
+--cfg=CONFIG_UNINLINE_SPIN_UNLOCK="y"
+--cfg=CONFIG_SND_JACK_INPUT_DEV
+--cfg=CONFIG_SND_JACK_INPUT_DEV="y"
+--cfg=CONFIG_CRYPTO_SHA1
+--cfg=CONFIG_CRYPTO_SHA1="y"
+--cfg=CONFIG_SATA_PMP
+--cfg=CONFIG_SATA_PMP="y"
+--cfg=CONFIG_XZ_DEC
+--cfg=CONFIG_XZ_DEC="y"
+--cfg=CONFIG_NET_VENDOR_TI
+--cfg=CONFIG_NET_VENDOR_TI="y"
+--cfg=CONFIG_LOCKD_V4
+--cfg=CONFIG_LOCKD_V4="y"
+--cfg=CONFIG_DUMMY
+--cfg=CONFIG_DUMMY="m"
+--cfg=CONFIG_NET_VENDOR_ALACRITECH
+--cfg=CONFIG_NET_VENDOR_ALACRITECH="y"
+--cfg=CONFIG_CRYPTO_DEV_ALLWINNER
+--cfg=CONFIG_CRYPTO_DEV_ALLWINNER="y"
+--cfg=CONFIG_WATCHDOG
+--cfg=CONFIG_WATCHDOG="y"
+--cfg=CONFIG_TUNE_GENERIC
+--cfg=CONFIG_TUNE_GENERIC="y"
+--cfg=CONFIG_HAS_IOMEM
+--cfg=CONFIG_HAS_IOMEM="y"
+--cfg=CONFIG_ACPI_GENERIC_GSI
+--cfg=CONFIG_ACPI_GENERIC_GSI="y"
+--cfg=CONFIG_NF_LOG_IPV6
+--cfg=CONFIG_NF_LOG_IPV6="m"
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7100
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7100="y"
+--cfg=CONFIG_CRYPTO_RSA
+--cfg=CONFIG_CRYPTO_RSA="y"
+--cfg=CONFIG_DMA_DIRECT_REMAP
+--cfg=CONFIG_DMA_DIRECT_REMAP="y"
+--cfg=CONFIG_CRYPTO_SIG2
+--cfg=CONFIG_CRYPTO_SIG2="y"
+--cfg=CONFIG_PWRSEQ_EMMC
+--cfg=CONFIG_PWRSEQ_EMMC="y"
+--cfg=CONFIG_HAVE_MOVE_PUD
+--cfg=CONFIG_HAVE_MOVE_PUD="y"
+--cfg=CONFIG_VIDEOMODE_HELPERS
+--cfg=CONFIG_VIDEOMODE_HELPERS="y"
+--cfg=CONFIG_CRYPTO_ACOMP2
+--cfg=CONFIG_CRYPTO_ACOMP2="y"
+--cfg=CONFIG_ARCH_HAS_SETUP_DMA_OPS
+--cfg=CONFIG_ARCH_HAS_SETUP_DMA_OPS="y"
+--cfg=CONFIG_HAVE_ARCH_KASAN_VMALLOC
+--cfg=CONFIG_HAVE_ARCH_KASAN_VMALLOC="y"
+--cfg=CONFIG_ARCH_SIFIVE
+--cfg=CONFIG_ARCH_SIFIVE="y"
+--cfg=CONFIG_MTD_MAP_BANK_WIDTH_1
+--cfg=CONFIG_MTD_MAP_BANK_WIDTH_1="y"
+--cfg=CONFIG_SCHED_HRTICK
+--cfg=CONFIG_SCHED_HRTICK="y"
+--cfg=CONFIG_RESET_STARFIVE_JH7100
+--cfg=CONFIG_RESET_STARFIVE_JH7100="y"
+--cfg=CONFIG_EPOLL
+--cfg=CONFIG_EPOLL="y"
+--cfg=CONFIG_GENERIC_IRQ_MULTI_HANDLER
+--cfg=CONFIG_GENERIC_IRQ_MULTI_HANDLER="y"
+--cfg=CONFIG_SND_PCM
+--cfg=CONFIG_SND_PCM="y"
+--cfg=CONFIG_SATA_MOBILE_LPM_POLICY="0"
+--cfg=CONFIG_SUN20I_D1_CCU
+--cfg=CONFIG_SUN20I_D1_CCU="y"
+--cfg=CONFIG_RISCV_ISA_V_DEFAULT_ENABLE
+--cfg=CONFIG_RISCV_ISA_V_DEFAULT_ENABLE="y"
+--cfg=CONFIG_FAILOVER
+--cfg=CONFIG_FAILOVER="y"
+--cfg=CONFIG_RISCV_ISA_ZBB
+--cfg=CONFIG_RISCV_ISA_ZBB="y"
+--cfg=CONFIG_CGROUP_HUGETLB
+--cfg=CONFIG_CGROUP_HUGETLB="y"
+--cfg=CONFIG_USB_U_ETHER
+--cfg=CONFIG_USB_U_ETHER="m"
+--cfg=CONFIG_ERRATA_THEAD_CMO
+--cfg=CONFIG_ERRATA_THEAD_CMO="y"
+--cfg=CONFIG_RISCV_DMA_NONCOHERENT
+--cfg=CONFIG_RISCV_DMA_NONCOHERENT="y"
+--cfg=CONFIG_GENERIC_PTDUMP
+--cfg=CONFIG_GENERIC_PTDUMP="y"
+--cfg=CONFIG_NET
+--cfg=CONFIG_NET="y"
+--cfg=CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
+--cfg=CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE="y"
+--cfg=CONFIG_USB_OHCI_HCD_PCI
+--cfg=CONFIG_USB_OHCI_HCD_PCI="y"
+--cfg=CONFIG_SND_JACK
+--cfg=CONFIG_SND_JACK="y"
+--cfg=CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN
+--cfg=CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN="y"
+--cfg=CONFIG_SECURITY_PATH
+--cfg=CONFIG_SECURITY_PATH="y"
+--cfg=CONFIG_VIRTIO_DMA_SHARED_BUFFER
+--cfg=CONFIG_VIRTIO_DMA_SHARED_BUFFER="m"
+--cfg=CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC
+--cfg=CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC="y"
+--cfg=CONFIG_TOOLCHAIN_HAS_V
+--cfg=CONFIG_TOOLCHAIN_HAS_V="y"
+--cfg=CONFIG_NETFILTER_XT_MATCH_CONNTRACK
+--cfg=CONFIG_NETFILTER_XT_MATCH_CONNTRACK="m"
+--cfg=CONFIG_RISCV_ALTERNATIVE
+--cfg=CONFIG_RISCV_ALTERNATIVE="y"
+--cfg=CONFIG_PWRSEQ_SIMPLE
+--cfg=CONFIG_PWRSEQ_SIMPLE="y"
+--cfg=CONFIG_PINMUX
+--cfg=CONFIG_PINMUX="y"
+--cfg=CONFIG_MTD_GEN_PROBE
+--cfg=CONFIG_MTD_GEN_PROBE="y"
+--cfg=CONFIG_IRQ_DOMAIN_HIERARCHY
+--cfg=CONFIG_IRQ_DOMAIN_HIERARCHY="y"
+--cfg=CONFIG_ATA_FORCE
+--cfg=CONFIG_ATA_FORCE="y"
+--cfg=CONFIG_NETFILTER_BPF_LINK
+--cfg=CONFIG_NETFILTER_BPF_LINK="y"
+--cfg=CONFIG_MPILIB
+--cfg=CONFIG_MPILIB="y"
+--cfg=CONFIG_PACKET
+--cfg=CONFIG_PACKET="y"
+--cfg=CONFIG_XFRM_ALGO
+--cfg=CONFIG_XFRM_ALGO="m"
+--cfg=CONFIG_SND_SIMPLE_CARD_UTILS
+--cfg=CONFIG_SND_SIMPLE_CARD_UTILS="m"
+--cfg=CONFIG_BLK_ICQ
+--cfg=CONFIG_BLK_ICQ="y"
+--cfg=CONFIG_HAVE_CLK_PREPARE
+--cfg=CONFIG_HAVE_CLK_PREPARE="y"
+--cfg=CONFIG_CRYPTO_AKCIPHER2
+--cfg=CONFIG_CRYPTO_AKCIPHER2="y"
+--cfg=CONFIG_FB_IOMEM_FOPS
+--cfg=CONFIG_FB_IOMEM_FOPS="y"
+--cfg=CONFIG_SND_CTL_FAST_LOOKUP
+--cfg=CONFIG_SND_CTL_FAST_LOOKUP="y"
+--cfg=CONFIG_BTRFS_FS_POSIX_ACL
+--cfg=CONFIG_BTRFS_FS_POSIX_ACL="y"
+--cfg=CONFIG_CLK_STARFIVE_JH7100
+--cfg=CONFIG_CLK_STARFIVE_JH7100="y"
+--cfg=CONFIG_CRYPTO_JITTERENTROPY_OSR="1"
+--cfg=CONFIG_NET_VENDOR_ALLWINNER
+--cfg=CONFIG_NET_VENDOR_ALLWINNER="y"
+--cfg=CONFIG_DUMMY_CONSOLE_ROWS="25"
+--cfg=CONFIG_USB_XHCI_PLATFORM
+--cfg=CONFIG_USB_XHCI_PLATFORM="y"
+--cfg=CONFIG_NF_CONNTRACK_TFTP
+--cfg=CONFIG_NF_CONNTRACK_TFTP="m"
+--cfg=CONFIG_NFS_V3
+--cfg=CONFIG_NFS_V3="y"
+--cfg=CONFIG_RISCV_ALTERNATIVE_EARLY
+--cfg=CONFIG_RISCV_ALTERNATIVE_EARLY="y"
+--cfg=CONFIG_HAVE_KVM_IRQFD
+--cfg=CONFIG_HAVE_KVM_IRQFD="y"
+--cfg=CONFIG_INET
+--cfg=CONFIG_INET="y"
+--cfg=CONFIG_XZ_DEC_POWERPC
+--cfg=CONFIG_XZ_DEC_POWERPC="y"
+--cfg=CONFIG_IP_PNP_BOOTP
+--cfg=CONFIG_IP_PNP_BOOTP="y"
+--cfg=CONFIG_USB_MUSB_HDRC
+--cfg=CONFIG_USB_MUSB_HDRC="m"
+--cfg=CONFIG_VIRTIO_NET
+--cfg=CONFIG_VIRTIO_NET="y"
+--cfg=CONFIG_NETFILTER_XT_MATCH_ADDRTYPE
+--cfg=CONFIG_NETFILTER_XT_MATCH_ADDRTYPE="m"
+--cfg=CONFIG_NET_VENDOR_HUAWEI
+--cfg=CONFIG_NET_VENDOR_HUAWEI="y"
+--cfg=CONFIG_PREVENT_FIRMWARE_BUILD
+--cfg=CONFIG_PREVENT_FIRMWARE_BUILD="y"
+--cfg=CONFIG_SERIAL_8250_PNP
+--cfg=CONFIG_SERIAL_8250_PNP="y"
+--cfg=CONFIG_DRM_DW_HDMI
+--cfg=CONFIG_DRM_DW_HDMI="m"
+--cfg=CONFIG_FREEZER
+--cfg=CONFIG_FREEZER="y"
+--cfg=CONFIG_USB_F_SUBSET
+--cfg=CONFIG_USB_F_SUBSET="m"
+--cfg=CONFIG_PCI_DOMAINS
+--cfg=CONFIG_PCI_DOMAINS="y"
+--cfg=CONFIG_NET_VENDOR_CHELSIO
+--cfg=CONFIG_NET_VENDOR_CHELSIO="y"
+--cfg=CONFIG_EFIVAR_FS
+--cfg=CONFIG_EFIVAR_FS="m"
+--cfg=CONFIG_HAVE_ARCH_VMAP_STACK
+--cfg=CONFIG_HAVE_ARCH_VMAP_STACK="y"
+--cfg=CONFIG_NETFILTER_XT_MATCH_IPVS
+--cfg=CONFIG_NETFILTER_XT_MATCH_IPVS="m"
+--cfg=CONFIG_RISCV_MISALIGNED
+--cfg=CONFIG_RISCV_MISALIGNED="y"
+--cfg=CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
+--cfg=CONFIG_USB_F_ACM
+--cfg=CONFIG_USB_F_ACM="m"
+--cfg=CONFIG_RTC_LIB
+--cfg=CONFIG_RTC_LIB="y"
+--cfg=CONFIG_SUN20I_D1_R_CCU
+--cfg=CONFIG_SUN20I_D1_R_CCU="y"
+--cfg=CONFIG_RISCV_ISA_SVNAPOT
+--cfg=CONFIG_RISCV_ISA_SVNAPOT="y"
+--cfg=CONFIG_HAVE_KPROBES
+--cfg=CONFIG_HAVE_KPROBES="y"
+--cfg=CONFIG_CRYPTO_AES
+--cfg=CONFIG_CRYPTO_AES="m"
+--cfg=CONFIG_HAVE_GENERIC_VDSO
+--cfg=CONFIG_HAVE_GENERIC_VDSO="y"
+--cfg=CONFIG_THREAD_SIZE_ORDER="2"
+--cfg=CONFIG_GPIOLIB
+--cfg=CONFIG_GPIOLIB="y"
+--cfg=CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+--cfg=CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT="y"
+--cfg=CONFIG_FUTEX_PI
+--cfg=CONFIG_FUTEX_PI="y"
+--cfg=CONFIG_MMC_DW
+--cfg=CONFIG_MMC_DW="y"
+--cfg=CONFIG_DM_BIO_PRISON
+--cfg=CONFIG_DM_BIO_PRISON="m"
+--cfg=CONFIG_AUTOFS_FS
+--cfg=CONFIG_AUTOFS_FS="y"
+--cfg=CONFIG_ISO9660_FS
+--cfg=CONFIG_ISO9660_FS="y"
+--cfg=CONFIG_NETFILTER_XT_NAT
+--cfg=CONFIG_NETFILTER_XT_NAT="m"
+--cfg=CONFIG_DMA_NONCOHERENT_MMAP
+--cfg=CONFIG_DMA_NONCOHERENT_MMAP="y"
+--cfg=CONFIG_CLKSRC_MMIO
+--cfg=CONFIG_CLKSRC_MMIO="y"
+--cfg=CONFIG_MTD_CFI_NOSWAP
+--cfg=CONFIG_MTD_CFI_NOSWAP="y"
+--cfg=CONFIG_NET_VENDOR_AQUANTIA
+--cfg=CONFIG_NET_VENDOR_AQUANTIA="y"
+--cfg=CONFIG_SCSI_VIRTIO
+--cfg=CONFIG_SCSI_VIRTIO="y"
+--cfg=CONFIG_HVC_DRIVER
+--cfg=CONFIG_HVC_DRIVER="y"
+--cfg=CONFIG_NETFILTER
+--cfg=CONFIG_NETFILTER="y"
+--cfg=CONFIG_HAVE_ARCH_KASAN
+--cfg=CONFIG_HAVE_ARCH_KASAN="y"
+--cfg=CONFIG_NET_VENDOR_SMSC
+--cfg=CONFIG_NET_VENDOR_SMSC="y"
+--cfg=CONFIG_GENERIC_ARCH_TOPOLOGY
+--cfg=CONFIG_GENERIC_ARCH_TOPOLOGY="y"
+--cfg=CONFIG_NFS_DISABLE_UDP_SUPPORT
+--cfg=CONFIG_NFS_DISABLE_UDP_SUPPORT="y"
+--cfg=CONFIG_SERIO_SERPORT
+--cfg=CONFIG_SERIO_SERPORT="y"
+--cfg=CONFIG_CLONE_BACKWARDS
+--cfg=CONFIG_CLONE_BACKWARDS="y"
+--cfg=CONFIG_RD_XZ
+--cfg=CONFIG_RD_XZ="y"
+--cfg=CONFIG_HAVE_PREEMPT_DYNAMIC_KEY
+--cfg=CONFIG_HAVE_PREEMPT_DYNAMIC_KEY="y"
+--cfg=CONFIG_AUXILIARY_BUS
+--cfg=CONFIG_AUXILIARY_BUS="y"
+--cfg=CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
+--cfg=CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS="y"
+--cfg=CONFIG_ATA_VERBOSE_ERROR
+--cfg=CONFIG_ATA_VERBOSE_ERROR="y"
+--cfg=CONFIG_SND_DRIVERS
+--cfg=CONFIG_SND_DRIVERS="y"
+--cfg=CONFIG_NET_FLOW_LIMIT
+--cfg=CONFIG_NET_FLOW_LIMIT="y"
+--cfg=CONFIG_LOCKDEP_SUPPORT
+--cfg=CONFIG_LOCKDEP_SUPPORT="y"
+--cfg=CONFIG_RISCV_PMU
+--cfg=CONFIG_RISCV_PMU="y"
+--cfg=CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+--cfg=CONFIG_ARCH_WANT_HUGE_PMD_SHARE="y"
+--cfg=CONFIG_DEBUG_ATOMIC_SLEEP
+--cfg=CONFIG_DEBUG_ATOMIC_SLEEP="y"
+--cfg=CONFIG_POSIX_MQUEUE
+--cfg=CONFIG_POSIX_MQUEUE="y"
+--cfg=CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS
+--cfg=CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS="y"
+--cfg=CONFIG_NOUVEAU_DEBUG="5"
+--cfg=CONFIG_NETFILTER_INGRESS
+--cfg=CONFIG_NETFILTER_INGRESS="y"
+--cfg=CONFIG_CRYPTO_LIB_GF128MUL
+--cfg=CONFIG_CRYPTO_LIB_GF128MUL="m"
+--cfg=CONFIG_PCIE_FU740
+--cfg=CONFIG_PCIE_FU740="y"
+--cfg=CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE="256"
+--cfg=CONFIG_CPU_FREQ_STAT
+--cfg=CONFIG_CPU_FREQ_STAT="y"
+--cfg=CONFIG_NET_XGRESS
+--cfg=CONFIG_NET_XGRESS="y"
+--cfg=CONFIG_GENERIC_STRNCPY_FROM_USER
+--cfg=CONFIG_GENERIC_STRNCPY_FROM_USER="y"
+--cfg=CONFIG_MTD_BLKDEVS
+--cfg=CONFIG_MTD_BLKDEVS="y"
+--cfg=CONFIG_HAVE_RSEQ
+--cfg=CONFIG_HAVE_RSEQ="y"
+--cfg=CONFIG_OF_KOBJ
+--cfg=CONFIG_OF_KOBJ="y"
+--cfg=CONFIG_CONTEXT_TRACKING_IDLE
+--cfg=CONFIG_CONTEXT_TRACKING_IDLE="y"
+--cfg=CONFIG_DEBUG_SPINLOCK
+--cfg=CONFIG_DEBUG_SPINLOCK="y"
+--cfg=CONFIG_NET_VENDOR_DEC
+--cfg=CONFIG_NET_VENDOR_DEC="y"
+--cfg=CONFIG_AS_HAS_ULEB128
+--cfg=CONFIG_AS_HAS_ULEB128="y"
+--cfg=CONFIG_ACPI_BUTTON
+--cfg=CONFIG_ACPI_BUTTON="y"
+--cfg=CONFIG_GENERIC_GETTIMEOFDAY
+--cfg=CONFIG_GENERIC_GETTIMEOFDAY="y"
+--cfg=CONFIG_PCS_XPCS
+--cfg=CONFIG_PCS_XPCS="m"
+--cfg=CONFIG_ARCH_USE_MEMTEST
+--cfg=CONFIG_ARCH_USE_MEMTEST="y"
+--cfg=CONFIG_DRM_SUN6I_DSI
+--cfg=CONFIG_DRM_SUN6I_DSI="m"
+--cfg=CONFIG_NET_VENDOR_PENSANDO
+--cfg=CONFIG_NET_VENDOR_PENSANDO="y"
+--cfg=CONFIG_IP6_NF_FILTER
+--cfg=CONFIG_IP6_NF_FILTER="m"
+--cfg=CONFIG_INPUT_MOUSEDEV_SCREEN_X="1024"
+--cfg=CONFIG_EFI_ESRT
+--cfg=CONFIG_EFI_ESRT="y"
+--cfg=CONFIG_NEED_DMA_MAP_STATE
+--cfg=CONFIG_NEED_DMA_MAP_STATE="y"
+--cfg=CONFIG_IIO
+--cfg=CONFIG_IIO="y"
+--cfg=CONFIG_SND_HDA_PREALLOC_SIZE="64"
+--cfg=CONFIG_SERIO_LIBPS2
+--cfg=CONFIG_SERIO_LIBPS2="y"
+--cfg=CONFIG_IP_VS_PROTO_TCP
+--cfg=CONFIG_IP_VS_PROTO_TCP="y"
+--cfg=CONFIG_SERIAL_SIFIVE_CONSOLE
+--cfg=CONFIG_SERIAL_SIFIVE_CONSOLE="y"
+--cfg=CONFIG_SOC_VIRT
+--cfg=CONFIG_SOC_VIRT="y"
+--cfg=CONFIG_PAGE_OFFSET="0xff60000000000000"
+--cfg=CONFIG_FONT_8x8
+--cfg=CONFIG_FONT_8x8="y"
+--cfg=CONFIG_NET_VENDOR_ATHEROS
+--cfg=CONFIG_NET_VENDOR_ATHEROS="y"
+--cfg=CONFIG_CLK_STARFIVE_JH7110_ISP
+--cfg=CONFIG_CLK_STARFIVE_JH7110_ISP="m"
+--cfg=CONFIG_XOR_BLOCKS
+--cfg=CONFIG_XOR_BLOCKS="m"
+--cfg=CONFIG_TIME_NS
+--cfg=CONFIG_TIME_NS="y"
+--cfg=CONFIG_NET_VENDOR_SUN
+--cfg=CONFIG_NET_VENDOR_SUN="y"
+--cfg=CONFIG_PANIC_TIMEOUT="0"
+--cfg=CONFIG_PM_GENERIC_DOMAINS_SLEEP
+--cfg=CONFIG_PM_GENERIC_DOMAINS_SLEEP="y"
+--cfg=CONFIG_HAVE_ARCH_SECCOMP
+--cfg=CONFIG_HAVE_ARCH_SECCOMP="y"
+--cfg=CONFIG_STACKDEPOT
+--cfg=CONFIG_STACKDEPOT="y"
+--cfg=CONFIG_NET_VENDOR_XILINX
+--cfg=CONFIG_NET_VENDOR_XILINX="y"
+--cfg=CONFIG_DECOMPRESS_LZ4
+--cfg=CONFIG_DECOMPRESS_LZ4="y"
+--cfg=CONFIG_PREEMPT_NONE
+--cfg=CONFIG_PREEMPT_NONE="y"
+--cfg=CONFIG_SPARSEMEM_MANUAL
+--cfg=CONFIG_SPARSEMEM_MANUAL="y"
+--cfg=CONFIG_ERRATA_SIFIVE_CIP_453
+--cfg=CONFIG_ERRATA_SIFIVE_CIP_453="y"
+--cfg=CONFIG_BPF_SYSCALL
+--cfg=CONFIG_BPF_SYSCALL="y"
+--cfg=CONFIG_SMP
+--cfg=CONFIG_SMP="y"
+--cfg=CONFIG_NET_VENDOR_CADENCE
+--cfg=CONFIG_NET_VENDOR_CADENCE="y"
+--cfg=CONFIG_NET_VENDOR_MICROSOFT
+--cfg=CONFIG_NET_VENDOR_MICROSOFT="y"
+--cfg=CONFIG_TTY
+--cfg=CONFIG_TTY="y"
+--cfg=CONFIG_IP_VS
+--cfg=CONFIG_IP_VS="m"
+--cfg=CONFIG_NET_VENDOR_I825XX
+--cfg=CONFIG_NET_VENDOR_I825XX="y"
+--cfg=CONFIG_PNP
+--cfg=CONFIG_PNP="y"
+--cfg=CONFIG_RCU_EXP_CPU_STALL_TIMEOUT="0"
+--cfg=CONFIG_GENERIC_ALLOCATOR
+--cfg=CONFIG_GENERIC_ALLOCATOR="y"
+--cfg=CONFIG_MMC_SDHCI_IO_ACCESSORS
+--cfg=CONFIG_MMC_SDHCI_IO_ACCESSORS="y"
+--cfg=CONFIG_FB_SYSMEM_HELPERS_DEFERRED
+--cfg=CONFIG_FB_SYSMEM_HELPERS_DEFERRED="y"
+--cfg=CONFIG_LIBCRC32C
+--cfg=CONFIG_LIBCRC32C="m"
+--cfg=CONFIG_GENERIC_BUG
+--cfg=CONFIG_GENERIC_BUG="y"
+--cfg=CONFIG_CRYPTO_SHA256
+--cfg=CONFIG_CRYPTO_SHA256="m"
+--cfg=CONFIG_HAVE_FTRACE_MCOUNT_RECORD
+--cfg=CONFIG_HAVE_FTRACE_MCOUNT_RECORD="y"
+--cfg=CONFIG_BRIDGE_VLAN_FILTERING
+--cfg=CONFIG_BRIDGE_VLAN_FILTERING="y"
+--cfg=CONFIG_POSIX_TIMERS
+--cfg=CONFIG_POSIX_TIMERS="y"
+--cfg=CONFIG_INET_TCP_DIAG
+--cfg=CONFIG_INET_TCP_DIAG="y"
+--cfg=CONFIG_HOTPLUG_CORE_SYNC_DEAD
+--cfg=CONFIG_HOTPLUG_CORE_SYNC_DEAD="y"
+--cfg=CONFIG_HW_CONSOLE
+--cfg=CONFIG_HW_CONSOLE="y"
+--cfg=CONFIG_MDIO_BITBANG
+--cfg=CONFIG_MDIO_BITBANG="y"
+--cfg=CONFIG_HAVE_KVM_IRQ_ROUTING
+--cfg=CONFIG_HAVE_KVM_IRQ_ROUTING="y"
+--cfg=CONFIG_DWMAC_SUNXI
+--cfg=CONFIG_DWMAC_SUNXI="m"
+--cfg=CONFIG_DEVMEM
+--cfg=CONFIG_DEVMEM="y"
+--cfg=CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY
+--cfg=CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY="y"
+--cfg=CONFIG_MOUSE_PS2_FOCALTECH
+--cfg=CONFIG_MOUSE_PS2_FOCALTECH="y"
+--cfg=CONFIG_CRYPTO_LIB_SHA1
+--cfg=CONFIG_CRYPTO_LIB_SHA1="y"
+--cfg=CONFIG_SND_SPI
+--cfg=CONFIG_SND_SPI="y"
+--cfg=CONFIG_SOC_STARFIVE
+--cfg=CONFIG_SOC_STARFIVE="y"
+--cfg=CONFIG_LIST_HARDENED
+--cfg=CONFIG_LIST_HARDENED="y"
+--cfg=CONFIG_DM_THIN_PROVISIONING
+--cfg=CONFIG_DM_THIN_PROVISIONING="m"
+--cfg=CONFIG_KEYBOARD_SUN4I_LRADC
+--cfg=CONFIG_KEYBOARD_SUN4I_LRADC="m"
+--cfg=CONFIG_PREEMPT_NONE_BUILD
+--cfg=CONFIG_PREEMPT_NONE_BUILD="y"
+--cfg=CONFIG_RTC_NVMEM
+--cfg=CONFIG_RTC_NVMEM="y"
+--cfg=CONFIG_ZSTD_COMMON
+--cfg=CONFIG_ZSTD_COMMON="y"
+--cfg=CONFIG_CC_HAS_KASAN_GENERIC
+--cfg=CONFIG_CC_HAS_KASAN_GENERIC="y"
+--cfg=CONFIG_DRM_SUN8I_DW_HDMI
+--cfg=CONFIG_DRM_SUN8I_DW_HDMI="m"
+--cfg=CONFIG_POWER_RESET_SYSCON_POWEROFF
+--cfg=CONFIG_POWER_RESET_SYSCON_POWEROFF="y"
+--cfg=CONFIG_DEBUG_KERNEL
+--cfg=CONFIG_DEBUG_KERNEL="y"
+--cfg=CONFIG_AS_HAS_OPTION_ARCH
+--cfg=CONFIG_AS_HAS_OPTION_ARCH="y"
+--cfg=CONFIG_COMPAT_BRK
+--cfg=CONFIG_COMPAT_BRK="y"
+--cfg=CONFIG_CLK_STARFIVE_JH7110_SYS
+--cfg=CONFIG_CLK_STARFIVE_JH7110_SYS="y"
+--cfg=CONFIG_LOCALVERSION=""
+--cfg=CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+--cfg=CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU="y"
+--cfg=CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+--cfg=CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK="y"
+--cfg=CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK
+--cfg=CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK="y"
+--cfg=CONFIG_MEMTEST
+--cfg=CONFIG_MEMTEST="y"
+--cfg=CONFIG_CMODEL_MEDANY
+--cfg=CONFIG_CMODEL_MEDANY="y"
+--cfg=CONFIG_CAN_RAW
+--cfg=CONFIG_CAN_RAW="m"
+--cfg=CONFIG_SYMBOLIC_ERRNAME
+--cfg=CONFIG_SYMBOLIC_ERRNAME="y"
+--cfg=CONFIG_CRYPTO
+--cfg=CONFIG_CRYPTO="y"
+--cfg=CONFIG_SCHED_DEBUG
+--cfg=CONFIG_SCHED_DEBUG="y"
+--cfg=CONFIG_NET_VENDOR_BROCADE
+--cfg=CONFIG_NET_VENDOR_BROCADE="y"
+--cfg=CONFIG_BTRFS_FS
+--cfg=CONFIG_BTRFS_FS="m"
+--cfg=CONFIG_DEFAULT_MMAP_MIN_ADDR="4096"
+--cfg=CONFIG_IP_NF_IPTABLES
+--cfg=CONFIG_IP_NF_IPTABLES="m"
+--cfg=CONFIG_CMDLINE=""
+--cfg=CONFIG_NET_VENDOR_QLOGIC
+--cfg=CONFIG_NET_VENDOR_QLOGIC="y"
+--cfg=CONFIG_USB_XHCI_HCD
+--cfg=CONFIG_USB_XHCI_HCD="y"
+--cfg=CONFIG_VIRTIO
+--cfg=CONFIG_VIRTIO="y"
+--cfg=CONFIG_SERIAL_SIFIVE
+--cfg=CONFIG_SERIAL_SIFIVE="y"
+--cfg=CONFIG_CFS_BANDWIDTH
+--cfg=CONFIG_CFS_BANDWIDTH="y"
+--cfg=CONFIG_NET_SELFTESTS
+--cfg=CONFIG_NET_SELFTESTS="y"
+--cfg=CONFIG_ARCH_RENESAS
+--cfg=CONFIG_ARCH_RENESAS="y"
+--cfg=CONFIG_DMA_VIRTUAL_CHANNELS
+--cfg=CONFIG_DMA_VIRTUAL_CHANNELS="m"
+--cfg=CONFIG_USB_ARCH_HAS_HCD
+--cfg=CONFIG_USB_ARCH_HAS_HCD="y"
+--cfg=CONFIG_GENERIC_IRQ_SHOW
+--cfg=CONFIG_GENERIC_IRQ_SHOW="y"
+--cfg=CONFIG_I2C_MV64XXX
+--cfg=CONFIG_I2C_MV64XXX="m"
+--cfg=CONFIG_NVMEM_SYSFS
+--cfg=CONFIG_NVMEM_SYSFS="y"
+--cfg=CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
+--cfg=CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE="y"
+--cfg=CONFIG_ARCH_HAS_ELF_RANDOMIZE
+--cfg=CONFIG_ARCH_HAS_ELF_RANDOMIZE="y"
+--cfg=CONFIG_9P_FS
+--cfg=CONFIG_9P_FS="y"
+--cfg=CONFIG_NETFS_SUPPORT
+--cfg=CONFIG_NETFS_SUPPORT="y"
+--cfg=CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+--cfg=CONFIG_HAVE_FUNCTION_ARG_ACCESS_API="y"
+--cfg=CONFIG_PANIC_ON_OOPS_VALUE="0"
+--cfg=CONFIG_NET_VENDOR_SAMSUNG
+--cfg=CONFIG_NET_VENDOR_SAMSUNG="y"
+--cfg=CONFIG_NET_VENDOR_ADI
+--cfg=CONFIG_NET_VENDOR_ADI="y"
+--cfg=CONFIG_INITRAMFS_PRESERVE_MTIME
+--cfg=CONFIG_INITRAMFS_PRESERVE_MTIME="y"
+--cfg=CONFIG_SCSI_MOD
+--cfg=CONFIG_SCSI_MOD="y"
+--cfg=CONFIG_NET_VENDOR_MICREL
+--cfg=CONFIG_NET_VENDOR_MICREL="y"
+--cfg=CONFIG_CRYPTO_CRC32C
+--cfg=CONFIG_CRYPTO_CRC32C="y"
+--cfg=CONFIG_SERIAL_CORE
+--cfg=CONFIG_SERIAL_CORE="y"
+--cfg=CONFIG_USB_CONFIGFS_SERIAL
+--cfg=CONFIG_USB_CONFIGFS_SERIAL="y"
+--cfg=CONFIG_HAVE_KRETPROBES
+--cfg=CONFIG_HAVE_KRETPROBES="y"
+--cfg=CONFIG_ASSOCIATIVE_ARRAY
+--cfg=CONFIG_ASSOCIATIVE_ARRAY="y"
+--cfg=CONFIG_NF_DEFRAG_IPV6
+--cfg=CONFIG_NF_DEFRAG_IPV6="m"
+--cfg=CONFIG_MICREL_PHY
+--cfg=CONFIG_MICREL_PHY="y"
+--cfg=CONFIG_MODULE_COMPRESS_NONE
+--cfg=CONFIG_MODULE_COMPRESS_NONE="y"
+--cfg=CONFIG_CC_HAS_ZERO_CALL_USED_REGS
+--cfg=CONFIG_CC_HAS_ZERO_CALL_USED_REGS="y"
+--cfg=CONFIG_NFS_V4
+--cfg=CONFIG_NFS_V4="y"
+--cfg=CONFIG_RCU_NEED_SEGCBLIST
+--cfg=CONFIG_RCU_NEED_SEGCBLIST="y"
+--cfg=CONFIG_HAS_DMA
+--cfg=CONFIG_HAS_DMA="y"
+--cfg=CONFIG_NF_CT_PROTO_SCTP
+--cfg=CONFIG_NF_CT_PROTO_SCTP="y"
+--cfg=CONFIG_SCSI
+--cfg=CONFIG_SCSI="y"
+--cfg=CONFIG_FB_CFB_FILLRECT
+--cfg=CONFIG_FB_CFB_FILLRECT="y"
+--cfg=CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
+--cfg=CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST="y"
+--cfg=CONFIG_HID
+--cfg=CONFIG_HID="y"
+--cfg=CONFIG_DMA_DECLARE_COHERENT
+--cfg=CONFIG_DMA_DECLARE_COHERENT="y"
+--cfg=CONFIG_CGROUP_NET_PRIO
+--cfg=CONFIG_CGROUP_NET_PRIO="y"
+--cfg=CONFIG_FONT_8x16
+--cfg=CONFIG_FONT_8x16="y"
+--cfg=CONFIG_NET_VENDOR_MELLANOX
+--cfg=CONFIG_NET_VENDOR_MELLANOX="y"
+--cfg=CONFIG_VT_CONSOLE_SLEEP
+--cfg=CONFIG_VT_CONSOLE_SLEEP="y"
+--cfg=CONFIG_RCU_EQS_DEBUG
+--cfg=CONFIG_RCU_EQS_DEBUG="y"
+--cfg=CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
+--cfg=CONFIG_ARCH_HAS_CURRENT_STACK_POINTER="y"
+--cfg=CONFIG_JBD2
+--cfg=CONFIG_JBD2="y"
+--cfg=CONFIG_SPARSEMEM_VMEMMAP
+--cfg=CONFIG_SPARSEMEM_VMEMMAP="y"
+--cfg=CONFIG_MEMCG_KMEM
+--cfg=CONFIG_MEMCG_KMEM="y"
+--cfg=CONFIG_NET_VENDOR_MARVELL
+--cfg=CONFIG_NET_VENDOR_MARVELL="y"
+--cfg=CONFIG_PHYLIB
+--cfg=CONFIG_PHYLIB="y"
+--cfg=CONFIG_REGULATOR_GPIO
+--cfg=CONFIG_REGULATOR_GPIO="y"
+--cfg=CONFIG_NET_VENDOR_NVIDIA
+--cfg=CONFIG_NET_VENDOR_NVIDIA="y"
+--cfg=CONFIG_ARCH_RV64I
+--cfg=CONFIG_ARCH_RV64I="y"
+--cfg=CONFIG_IRQ_DOMAIN
+--cfg=CONFIG_IRQ_DOMAIN="y"
+--cfg=CONFIG_RISCV
+--cfg=CONFIG_RISCV="y"
+--cfg=CONFIG_VIRTIO_BALLOON
+--cfg=CONFIG_VIRTIO_BALLOON="y"
+--cfg=CONFIG_LSM_MMAP_MIN_ADDR="65536"
+--cfg=CONFIG_LOCALVERSION_AUTO
+--cfg=CONFIG_LOCALVERSION_AUTO="y"
+--cfg=CONFIG_INTEGRITY_AUDIT
+--cfg=CONFIG_INTEGRITY_AUDIT="y"
+--cfg=CONFIG_ARCH_HAS_DEBUG_VIRTUAL
+--cfg=CONFIG_ARCH_HAS_DEBUG_VIRTUAL="y"
+--cfg=CONFIG_CLK_STARFIVE_JH7100_AUDIO
+--cfg=CONFIG_CLK_STARFIVE_JH7100_AUDIO="m"
+--cfg=CONFIG_HAVE_ASM_MODVERSIONS
+--cfg=CONFIG_HAVE_ASM_MODVERSIONS="y"
+--cfg=CONFIG_IPC_NS
+--cfg=CONFIG_IPC_NS="y"
+--cfg=CONFIG_MISC_FILESYSTEMS
+--cfg=CONFIG_MISC_FILESYSTEMS="y"
+--cfg=CONFIG_ARCH_MMAP_RND_BITS_MIN="18"
+--cfg=CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY
+--cfg=CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY="y"
+--cfg=CONFIG_DECOMPRESS_BZIP2
+--cfg=CONFIG_DECOMPRESS_BZIP2="y"
+--cfg=CONFIG_PER_VMA_LOCK
+--cfg=CONFIG_PER_VMA_LOCK="y"
+--cfg=CONFIG_ARCH_SUPPORTS_UPROBES
+--cfg=CONFIG_ARCH_SUPPORTS_UPROBES="y"
+--cfg=CONFIG_NET_VENDOR_STMICRO
+--cfg=CONFIG_NET_VENDOR_STMICRO="y"
+--cfg=CONFIG_XZ_DEC_SPARC
+--cfg=CONFIG_XZ_DEC_SPARC="y"
+--cfg=CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
+--cfg=CONFIG_SECURITY_APPARMOR_EXPORT_BINARY="y"
+--cfg=CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
+--cfg=CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE="y"
+--cfg=CONFIG_OF_GPIO
+--cfg=CONFIG_OF_GPIO="y"
+--cfg=CONFIG_ARCH_SUPPORTS_HUGETLBFS
+--cfg=CONFIG_ARCH_SUPPORTS_HUGETLBFS="y"
+--cfg=CONFIG_SERIAL_MCTRL_GPIO
+--cfg=CONFIG_SERIAL_MCTRL_GPIO="y"
+--cfg=CONFIG_REALTEK_PHY
+--cfg=CONFIG_REALTEK_PHY="y"
+--cfg=CONFIG_DST_CACHE
+--cfg=CONFIG_DST_CACHE="y"
+--cfg=CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+--cfg=CONFIG_KVM_GENERIC_HARDWARE_ENABLING="y"
+--cfg=CONFIG_NF_REJECT_IPV6
+--cfg=CONFIG_NF_REJECT_IPV6="m"
+--cfg=CONFIG_RCU_CPU_STALL_TIMEOUT="21"
+--cfg=CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+--cfg=CONFIG_POSIX_CPU_TIMERS_TASK_WORK="y"
+--cfg=CONFIG_CHECKPOINT_RESTORE
+--cfg=CONFIG_CHECKPOINT_RESTORE="y"
+--cfg=CONFIG_SND_VERBOSE_PROCFS
+--cfg=CONFIG_SND_VERBOSE_PROCFS="y"
+--cfg=CONFIG_LLD_VERSION="0"
+--cfg=CONFIG_SECTION_MISMATCH_WARN_ONLY
+--cfg=CONFIG_SECTION_MISMATCH_WARN_ONLY="y"
+--cfg=CONFIG_NETFILTER_EGRESS
+--cfg=CONFIG_NETFILTER_EGRESS="y"
+--cfg=CONFIG_MDIO_DEVICE
+--cfg=CONFIG_MDIO_DEVICE="y"
+--cfg=CONFIG_TIMER_PROBE
+--cfg=CONFIG_TIMER_PROBE="y"
+--cfg=CONFIG_MODPROBE_PATH="/sbin/modprobe"
+--cfg=CONFIG_POWER_RESET
+--cfg=CONFIG_POWER_RESET="y"
+--cfg=CONFIG_DRM_DISPLAY_HELPER
+--cfg=CONFIG_DRM_DISPLAY_HELPER="m"
+--cfg=CONFIG_USB_RENESAS_USBHS_UDC
+--cfg=CONFIG_USB_RENESAS_USBHS_UDC="m"
+--cfg=CONFIG_IP6_NF_MATCH_IPV6HEADER
+--cfg=CONFIG_IP6_NF_MATCH_IPV6HEADER="m"
+--cfg=CONFIG_MACVLAN
+--cfg=CONFIG_MACVLAN="m"
+--cfg=CONFIG_PCIEASPM_DEFAULT
+--cfg=CONFIG_PCIEASPM_DEFAULT="y"
+--cfg=CONFIG_PROFILING
+--cfg=CONFIG_PROFILING="y"
+--cfg=CONFIG_INTERVAL_TREE
+--cfg=CONFIG_INTERVAL_TREE="y"
+--cfg=CONFIG_MMC_DW_PLTFM
+--cfg=CONFIG_MMC_DW_PLTFM="y"
+--cfg=CONFIG_NET_VENDOR_AMAZON
+--cfg=CONFIG_NET_VENDOR_AMAZON="y"
+--cfg=CONFIG_SPARSEMEM
+--cfg=CONFIG_SPARSEMEM="y"
+--cfg=CONFIG_BLK_MQ_STACKING
+--cfg=CONFIG_BLK_MQ_STACKING="y"
+--cfg=CONFIG_DRM_GEM_SHMEM_HELPER
+--cfg=CONFIG_DRM_GEM_SHMEM_HELPER="m"
+--cfg=CONFIG_WLAN_VENDOR_ATMEL
+--cfg=CONFIG_WLAN_VENDOR_ATMEL="y"
+--cfg=CONFIG_GRACE_PERIOD
+--cfg=CONFIG_GRACE_PERIOD="y"
+--cfg=CONFIG_NET_VENDOR_TEHUTI
+--cfg=CONFIG_NET_VENDOR_TEHUTI="y"
+--cfg=CONFIG_CRYPTO_MANAGER
+--cfg=CONFIG_CRYPTO_MANAGER="y"
+--cfg=CONFIG_EDAC_SUPPORT
+--cfg=CONFIG_EDAC_SUPPORT="y"
+--cfg=CONFIG_RT_MUTEXES
+--cfg=CONFIG_RT_MUTEXES="y"
+--cfg=CONFIG_LOCK_SPIN_ON_OWNER
+--cfg=CONFIG_LOCK_SPIN_ON_OWNER="y"
+--cfg=CONFIG_CC_NO_ARRAY_BOUNDS
+--cfg=CONFIG_CC_NO_ARRAY_BOUNDS="y"
+--cfg=CONFIG_DRM_I2C_SIL164
+--cfg=CONFIG_DRM_I2C_SIL164="m"
+--cfg=CONFIG_HUGETLBFS
+--cfg=CONFIG_HUGETLBFS="y"
+--cfg=CONFIG_SLAB_MERGE_DEFAULT
+--cfg=CONFIG_SLAB_MERGE_DEFAULT="y"
+--cfg=CONFIG_KERNFS
+--cfg=CONFIG_KERNFS="y"
+--cfg=CONFIG_I2C_ALGOBIT
+--cfg=CONFIG_I2C_ALGOBIT="m"
+--cfg=CONFIG_MMC_BLOCK
+--cfg=CONFIG_MMC_BLOCK="y"
+--cfg=CONFIG_ACPI_REDUCED_HARDWARE_ONLY
+--cfg=CONFIG_ACPI_REDUCED_HARDWARE_ONLY="y"
+--cfg=CONFIG_KVM
+--cfg=CONFIG_KVM="m"
+--cfg=CONFIG_PAGE_COUNTER
+--cfg=CONFIG_PAGE_COUNTER="y"
+--cfg=CONFIG_IOMMU_DEFAULT_DMA_STRICT
+--cfg=CONFIG_IOMMU_DEFAULT_DMA_STRICT="y"
+--cfg=CONFIG_CLK_RENESAS
+--cfg=CONFIG_CLK_RENESAS="y"
+--cfg=CONFIG_SND_SIMPLE_CARD
+--cfg=CONFIG_SND_SIMPLE_CARD="m"
+--cfg=CONFIG_SND_PCI
+--cfg=CONFIG_SND_PCI="y"
+--cfg=CONFIG_EXPERT
+--cfg=CONFIG_EXPERT="y"
+--cfg=CONFIG_WIRELESS
+--cfg=CONFIG_WIRELESS="y"
+--cfg=CONFIG_RPMSG_CTRL
+--cfg=CONFIG_RPMSG_CTRL="y"
+--cfg=CONFIG_ARCH_SOPHGO
+--cfg=CONFIG_ARCH_SOPHGO="y"
+--cfg=CONFIG_HZ_250
+--cfg=CONFIG_HZ_250="y"
+--cfg=CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS="64"
+--cfg=CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
+--cfg=CONFIG_ARCH_HAS_STRICT_KERNEL_RWX="y"
+--cfg=CONFIG_LOCK_DEBUGGING_SUPPORT
+--cfg=CONFIG_LOCK_DEBUGGING_SUPPORT="y"
+--cfg=CONFIG_NF_LOG_SYSLOG
+--cfg=CONFIG_NF_LOG_SYSLOG="m"
+--cfg=CONFIG_DMA_SUN6I
+--cfg=CONFIG_DMA_SUN6I="m"
+--cfg=CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+--cfg=CONFIG_USB_CONFIGFS_MASS_STORAGE
+--cfg=CONFIG_USB_CONFIGFS_MASS_STORAGE="y"
+--cfg=CONFIG_RISCV_ISA_ZICBOZ
+--cfg=CONFIG_RISCV_ISA_ZICBOZ="y"
+--cfg=CONFIG_FRAME_WARN="2048"
+--cfg=CONFIG_NET_VENDOR_AGERE
+--cfg=CONFIG_NET_VENDOR_AGERE="y"
+--cfg=CONFIG_HID_GENERIC
+--cfg=CONFIG_HID_GENERIC="y"
+--cfg=CONFIG_ARCH_MMAP_RND_BITS="18"
+--cfg=CONFIG_GENERIC_HWEIGHT
+--cfg=CONFIG_GENERIC_HWEIGHT="y"
+--cfg=CONFIG_INITRAMFS_SOURCE=""
+--cfg=CONFIG_TASKS_TRACE_RCU
+--cfg=CONFIG_TASKS_TRACE_RCU="y"
+--cfg=CONFIG_CGROUPS
+--cfg=CONFIG_CGROUPS="y"
+--cfg=CONFIG_MMC
+--cfg=CONFIG_MMC="y"
+--cfg=CONFIG_LZO_COMPRESS
+--cfg=CONFIG_LZO_COMPRESS="m"
+--cfg=CONFIG_DAX
+--cfg=CONFIG_DAX="y"
+--cfg=CONFIG_VIRTIO_INPUT
+--cfg=CONFIG_VIRTIO_INPUT="y"
+--cfg=CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+--cfg=CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS="y"
+--cfg=CONFIG_CRYPTO_SEQIV
+--cfg=CONFIG_CRYPTO_SEQIV="m"
+--cfg=CONFIG_DRM_VIRTIO_GPU_KMS
+--cfg=CONFIG_DRM_VIRTIO_GPU_KMS="y"
+--cfg=CONFIG_HAVE_GCC_PLUGINS
+--cfg=CONFIG_HAVE_GCC_PLUGINS="y"
+--cfg=CONFIG_STACKTRACE
+--cfg=CONFIG_STACKTRACE="y"
+--cfg=CONFIG_HAVE_PCI
+--cfg=CONFIG_HAVE_PCI="y"
+--cfg=CONFIG_EXTCON
+--cfg=CONFIG_EXTCON="y"
+--cfg=CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
+--cfg=CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC="y"
+--cfg=CONFIG_HAS_IOPORT
+--cfg=CONFIG_HAS_IOPORT="y"
+--cfg=CONFIG_CRYPTO_DRBG
+--cfg=CONFIG_CRYPTO_DRBG="m"
+--cfg=CONFIG_OF_EARLY_FLATTREE
+--cfg=CONFIG_OF_EARLY_FLATTREE="y"
+--cfg=CONFIG_WLAN_VENDOR_ADMTEK
+--cfg=CONFIG_WLAN_VENDOR_ADMTEK="y"
+--cfg=CONFIG_CGROUP_CPUACCT
+--cfg=CONFIG_CGROUP_CPUACCT="y"
+--cfg=CONFIG_CAN_BCM
+--cfg=CONFIG_CAN_BCM="m"
+--cfg=CONFIG_HAS_IOPORT_MAP
+--cfg=CONFIG_HAS_IOPORT_MAP="y"
+--cfg=CONFIG_VIRTIO_PCI_LIB
+--cfg=CONFIG_VIRTIO_PCI_LIB="y"
+--cfg=CONFIG_NET_VENDOR_VIA
+--cfg=CONFIG_NET_VENDOR_VIA="y"
+--cfg=CONFIG_SERIAL_SH_SCI_CONSOLE
+--cfg=CONFIG_SERIAL_SH_SCI_CONSOLE="y"
+--cfg=CONFIG_HZ="250"
+--cfg=CONFIG_I2C_HELPER_AUTO
+--cfg=CONFIG_I2C_HELPER_AUTO="y"
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7110_SYS
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7110_SYS="y"
+--cfg=CONFIG_KVM_XFER_TO_GUEST_WORK
+--cfg=CONFIG_KVM_XFER_TO_GUEST_WORK="y"
+--cfg=CONFIG_SERIAL_8250_PERICOM
+--cfg=CONFIG_SERIAL_8250_PERICOM="y"
+--cfg=CONFIG_SIFIVE_PLIC
+--cfg=CONFIG_SIFIVE_PLIC="y"
+--cfg=CONFIG_SERIAL_8250_NR_UARTS="4"
+--cfg=CONFIG_ARCH_HAS_STRICT_MODULE_RWX
+--cfg=CONFIG_ARCH_HAS_STRICT_MODULE_RWX="y"
+--cfg=CONFIG_CC_IS_GCC
+--cfg=CONFIG_CC_IS_GCC="y"
+--cfg=CONFIG_NET_EGRESS
+--cfg=CONFIG_NET_EGRESS="y"
+--cfg=CONFIG_NET_VENDOR_ARC
+--cfg=CONFIG_NET_VENDOR_ARC="y"
+--cfg=CONFIG_CRYPTO_ENGINE
+--cfg=CONFIG_CRYPTO_ENGINE="y"
+--cfg=CONFIG_DRM_I2C_CH7006
+--cfg=CONFIG_DRM_I2C_CH7006="m"
+--cfg=CONFIG_HAVE_PERF_USER_STACK_DUMP
+--cfg=CONFIG_HAVE_PERF_USER_STACK_DUMP="y"
+--cfg=CONFIG_CGROUP_PERF
+--cfg=CONFIG_CGROUP_PERF="y"
+--cfg=CONFIG_NLATTR
+--cfg=CONFIG_NLATTR="y"
+--cfg=CONFIG_TCP_CONG_CUBIC
+--cfg=CONFIG_TCP_CONG_CUBIC="y"
+--cfg=CONFIG_NR_CPUS="64"
+--cfg=CONFIG_SUSPEND_FREEZER
+--cfg=CONFIG_SUSPEND_FREEZER="y"
+--cfg=CONFIG_MMC_SDHCI
+--cfg=CONFIG_MMC_SDHCI="y"
+--cfg=CONFIG_SND_SUPPORT_OLD_API
+--cfg=CONFIG_SND_SUPPORT_OLD_API="y"
+--cfg=CONFIG_MOUSE_PS2_TRACKPOINT
+--cfg=CONFIG_MOUSE_PS2_TRACKPOINT="y"
+--cfg=CONFIG_DRM_NOUVEAU_BACKLIGHT
+--cfg=CONFIG_DRM_NOUVEAU_BACKLIGHT="y"
+--cfg=CONFIG_SYSFS
+--cfg=CONFIG_SYSFS="y"
+--cfg=CONFIG_USB_DEFAULT_PERSIST
+--cfg=CONFIG_USB_DEFAULT_PERSIST="y"
+--cfg=CONFIG_AS_HAS_NON_CONST_LEB128
+--cfg=CONFIG_AS_HAS_NON_CONST_LEB128="y"
+--cfg=CONFIG_DRM_PANEL_BRIDGE
+--cfg=CONFIG_DRM_PANEL_BRIDGE="y"
+--cfg=CONFIG_USB_EHCI_HCD_PLATFORM
+--cfg=CONFIG_USB_EHCI_HCD_PLATFORM="y"
+--cfg=CONFIG_BLK_DEV_BSG_COMMON
+--cfg=CONFIG_BLK_DEV_BSG_COMMON="y"
+--cfg=CONFIG_ASN1
+--cfg=CONFIG_ASN1="y"
+--cfg=CONFIG_CLK_SIFIVE_PRCI
+--cfg=CONFIG_CLK_SIFIVE_PRCI="y"
+--cfg=CONFIG_CRYPTO_DEV_VIRTIO
+--cfg=CONFIG_CRYPTO_DEV_VIRTIO="y"
+--cfg=CONFIG_SPI_RSPI
+--cfg=CONFIG_SPI_RSPI="m"
+--cfg=CONFIG_XZ_DEC_ARM
+--cfg=CONFIG_XZ_DEC_ARM="y"
+--cfg=CONFIG_USB_CONFIGFS_EEM
+--cfg=CONFIG_USB_CONFIGFS_EEM="y"
+--cfg=CONFIG_PTP_1588_CLOCK_OPTIONAL
+--cfg=CONFIG_PTP_1588_CLOCK_OPTIONAL="y"
+--cfg=CONFIG_FB_DMAMEM_HELPERS
+--cfg=CONFIG_FB_DMAMEM_HELPERS="y"
+--cfg=CONFIG_FB_SYS_FOPS
+--cfg=CONFIG_FB_SYS_FOPS="y"
+--cfg=CONFIG_HAVE_SYSCALL_TRACEPOINTS
+--cfg=CONFIG_HAVE_SYSCALL_TRACEPOINTS="y"
+--cfg=CONFIG_HAVE_ARCH_HUGE_VMALLOC
+--cfg=CONFIG_HAVE_ARCH_HUGE_VMALLOC="y"
+--cfg=CONFIG_SERIAL_SH_SCI_DMA
+--cfg=CONFIG_SERIAL_SH_SCI_DMA="y"
+--cfg=CONFIG_ACPI_BATTERY
+--cfg=CONFIG_ACPI_BATTERY="y"
+--cfg=CONFIG_IO_WQ
+--cfg=CONFIG_IO_WQ="y"
+--cfg=CONFIG_DECOMPRESS_ZSTD
+--cfg=CONFIG_DECOMPRESS_ZSTD="y"
+--cfg=CONFIG_FB
+--cfg=CONFIG_FB="y"
+--cfg=CONFIG_BLK_MQ_VIRTIO
+--cfg=CONFIG_BLK_MQ_VIRTIO="y"
+--cfg=CONFIG_I2C_COMPAT
+--cfg=CONFIG_I2C_COMPAT="y"
+--cfg=CONFIG_DRM_SCHED
+--cfg=CONFIG_DRM_SCHED="m"
+--cfg=CONFIG_WLAN_VENDOR_ZYDAS
+--cfg=CONFIG_WLAN_VENDOR_ZYDAS="y"
+--cfg=CONFIG_SPARSEMEM_VMEMMAP_ENABLE
+--cfg=CONFIG_SPARSEMEM_VMEMMAP_ENABLE="y"
+--cfg=CONFIG_DRM_SUN4I
+--cfg=CONFIG_DRM_SUN4I="m"
+--cfg=CONFIG_IPVLAN
+--cfg=CONFIG_IPVLAN="m"
+--cfg=CONFIG_VIRTUALIZATION
+--cfg=CONFIG_VIRTUALIZATION="y"
+--cfg=CONFIG_ND_CLAIM
+--cfg=CONFIG_ND_CLAIM="y"
+--cfg=CONFIG_MSDOS_PARTITION
+--cfg=CONFIG_MSDOS_PARTITION="y"
+--cfg=CONFIG_RTC_I2C_AND_SPI
+--cfg=CONFIG_RTC_I2C_AND_SPI="m"
+--cfg=CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK
+--cfg=CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK="y"
+--cfg=CONFIG_LEGACY_DIRECT_IO
+--cfg=CONFIG_LEGACY_DIRECT_IO="y"
+--cfg=CONFIG_THERMAL
+--cfg=CONFIG_THERMAL="y"
+--cfg=CONFIG_SYNC_FILE
+--cfg=CONFIG_SYNC_FILE="y"
+--cfg=CONFIG_USB_XHCI_PCI
+--cfg=CONFIG_USB_XHCI_PCI="y"
+--cfg=CONFIG_IP_PNP_RARP
+--cfg=CONFIG_IP_PNP_RARP="y"
+--cfg=CONFIG_DEBUG_TIMEKEEPING
+--cfg=CONFIG_DEBUG_TIMEKEEPING="y"
+--cfg=CONFIG_VETH
+--cfg=CONFIG_VETH="m"
+--cfg=CONFIG_NET_VENDOR_3COM
+--cfg=CONFIG_NET_VENDOR_3COM="y"
+--cfg=CONFIG_STACKPROTECTOR
+--cfg=CONFIG_STACKPROTECTOR="y"
+--cfg=CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK
+--cfg=CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK="y"
+--cfg=CONFIG_MMC_SDHCI_CADENCE
+--cfg=CONFIG_MMC_SDHCI_CADENCE="y"
+--cfg=CONFIG_HAVE_ARCH_KGDB
+--cfg=CONFIG_HAVE_ARCH_KGDB="y"
+--cfg=CONFIG_BLK_DEBUG_FS
+--cfg=CONFIG_BLK_DEBUG_FS="y"
+--cfg=CONFIG_NET_VENDOR_INTEL
+--cfg=CONFIG_NET_VENDOR_INTEL="y"
+--cfg=CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
+--cfg=CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK="y"
+--cfg=CONFIG_RPS
+--cfg=CONFIG_RPS="y"
+--cfg=CONFIG_SERIAL_8250_EXAR
+--cfg=CONFIG_SERIAL_8250_EXAR="y"
+--cfg=CONFIG_PROC_PID_CPUSET
+--cfg=CONFIG_PROC_PID_CPUSET="y"
+--cfg=CONFIG_PM_GENERIC_DOMAINS
+--cfg=CONFIG_PM_GENERIC_DOMAINS="y"
+--cfg=CONFIG_LEGACY_PTY_COUNT="256"
+--cfg=CONFIG_GENERIC_CSUM
+--cfg=CONFIG_GENERIC_CSUM="y"
+--cfg=CONFIG_MTD_MAP_BANK_WIDTH_2
+--cfg=CONFIG_MTD_MAP_BANK_WIDTH_2="y"
+--cfg=CONFIG_GENERIC_IDLE_POLL_SETUP
+--cfg=CONFIG_GENERIC_IDLE_POLL_SETUP="y"
+--cfg=CONFIG_RESET_SIMPLE
+--cfg=CONFIG_RESET_SIMPLE="y"
+--cfg=CONFIG_MDIO_BUS_MUX
+--cfg=CONFIG_MDIO_BUS_MUX="m"
+--cfg=CONFIG_ZISOFS
+--cfg=CONFIG_ZISOFS="y"
+--cfg=CONFIG_WLAN_VENDOR_MEDIATEK
+--cfg=CONFIG_WLAN_VENDOR_MEDIATEK="y"
+--cfg=CONFIG_IP_MULTICAST
+--cfg=CONFIG_IP_MULTICAST="y"
+--cfg=CONFIG_NET_VENDOR_CISCO
+--cfg=CONFIG_NET_VENDOR_CISCO="y"
+--cfg=CONFIG_GENERIC_IRQ_IPI_MUX
+--cfg=CONFIG_GENERIC_IRQ_IPI_MUX="y"
+--cfg=CONFIG_TICK_ONESHOT
+--cfg=CONFIG_TICK_ONESHOT="y"
+--cfg=CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
+--cfg=CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL="y"
+--cfg=CONFIG_CRYPTO_CTR
+--cfg=CONFIG_CRYPTO_CTR="m"
+--cfg=CONFIG_XARRAY_MULTI
+--cfg=CONFIG_XARRAY_MULTI="y"
+--cfg=CONFIG_LOCK_MM_AND_FIND_VMA
+--cfg=CONFIG_LOCK_MM_AND_FIND_VMA="y"
+--cfg=CONFIG_SUNXI_WATCHDOG
+--cfg=CONFIG_SUNXI_WATCHDOG="y"
+--cfg=CONFIG_HW_RANDOM
+--cfg=CONFIG_HW_RANDOM="y"
+--cfg=CONFIG_MUTEX_SPIN_ON_OWNER
+--cfg=CONFIG_MUTEX_SPIN_ON_OWNER="y"
+--cfg=CONFIG_DEBUG_VM_IRQSOFF
+--cfg=CONFIG_DEBUG_VM_IRQSOFF="y"
+--cfg=CONFIG_DYNAMIC_SIGFRAME
+--cfg=CONFIG_DYNAMIC_SIGFRAME="y"
+--cfg=CONFIG_CGROUP_NET_CLASSID
+--cfg=CONFIG_CGROUP_NET_CLASSID="y"
+--cfg=CONFIG_RISCV_SBI_CPUIDLE
+--cfg=CONFIG_RISCV_SBI_CPUIDLE="y"
+--cfg=CONFIG_HAVE_FUNCTION_GRAPH_TRACER
+--cfg=CONFIG_HAVE_FUNCTION_GRAPH_TRACER="y"
+--cfg=CONFIG_BUFFER_HEAD
+--cfg=CONFIG_BUFFER_HEAD="y"
+--cfg=CONFIG_OF_MDIO
+--cfg=CONFIG_OF_MDIO="y"
+--cfg=CONFIG_CRYPTO_BLAKE2B
+--cfg=CONFIG_CRYPTO_BLAKE2B="m"
+--cfg=CONFIG_TREE_SRCU
+--cfg=CONFIG_TREE_SRCU="y"
+--cfg=CONFIG_CRYPTO_NULL2
+--cfg=CONFIG_CRYPTO_NULL2="m"
+--cfg=CONFIG_ARCH_HAS_MMIOWB
+--cfg=CONFIG_ARCH_HAS_MMIOWB="y"
+--cfg=CONFIG_ACPI_MDIO
+--cfg=CONFIG_ACPI_MDIO="y"
+--cfg=CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+--cfg=CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP="y"
+--cfg=CONFIG_SERIAL_8250_DMA
+--cfg=CONFIG_SERIAL_8250_DMA="y"
+--cfg=CONFIG_BASE_SMALL="0"
+--cfg=CONFIG_SECURITY_SELINUX_AVC_STATS
+--cfg=CONFIG_SECURITY_SELINUX_AVC_STATS="y"
+--cfg=CONFIG_VIDEO_CMDLINE
+--cfg=CONFIG_VIDEO_CMDLINE="y"
+--cfg=CONFIG_COMPACTION
+--cfg=CONFIG_COMPACTION="y"
+--cfg=CONFIG_NFS_V2
+--cfg=CONFIG_NFS_V2="y"
+--cfg=CONFIG_BLK_CGROUP_PUNT_BIO
+--cfg=CONFIG_BLK_CGROUP_PUNT_BIO="y"
+--cfg=CONFIG_PROC_FS
+--cfg=CONFIG_PROC_FS="y"
+--cfg=CONFIG_MTD_BLOCK
+--cfg=CONFIG_MTD_BLOCK="y"
+--cfg=CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE
+--cfg=CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE="y"
+--cfg=CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+--cfg=CONFIG_GENERIC_BUG_RELATIVE_POINTERS="y"
+--cfg=CONFIG_VIRTIO_MMIO
+--cfg=CONFIG_VIRTIO_MMIO="y"
+--cfg=CONFIG_NET_VENDOR_ROCKER
+--cfg=CONFIG_NET_VENDOR_ROCKER="y"
+--cfg=CONFIG_SCSI_LOWLEVEL
+--cfg=CONFIG_SCSI_LOWLEVEL="y"
+--cfg=CONFIG_RISCV_PMU_SBI
+--cfg=CONFIG_RISCV_PMU_SBI="y"
+--cfg=CONFIG_MEMFD_CREATE
+--cfg=CONFIG_MEMFD_CREATE="y"
+--cfg=CONFIG_IRQ_FORCED_THREADING
+--cfg=CONFIG_IRQ_FORCED_THREADING="y"
+--cfg=CONFIG_DRM_FBDEV_EMULATION
+--cfg=CONFIG_DRM_FBDEV_EMULATION="y"
+--cfg=CONFIG_SND
+--cfg=CONFIG_SND="y"
+--cfg=CONFIG_CLK_STARFIVE_JH7110_VOUT
+--cfg=CONFIG_CLK_STARFIVE_JH7110_VOUT="m"
+--cfg=CONFIG_USB_F_OBEX
+--cfg=CONFIG_USB_F_OBEX="m"
+--cfg=CONFIG_PHY_SUN6I_MIPI_DPHY
+--cfg=CONFIG_PHY_SUN6I_MIPI_DPHY="m"
+--cfg=CONFIG_PCIE_DW
+--cfg=CONFIG_PCIE_DW="y"
+--cfg=CONFIG_LD_ORPHAN_WARN
+--cfg=CONFIG_LD_ORPHAN_WARN="y"
+--cfg=CONFIG_NET_VENDOR_NATSEMI
+--cfg=CONFIG_NET_VENDOR_NATSEMI="y"
+--cfg=CONFIG_USB_MUSB_DUAL_ROLE
+--cfg=CONFIG_USB_MUSB_DUAL_ROLE="y"
+--cfg=CONFIG_VIRTIO_PCI_LIB_LEGACY
+--cfg=CONFIG_VIRTIO_PCI_LIB_LEGACY="y"
+--cfg=CONFIG_USB_F_MASS_STORAGE
+--cfg=CONFIG_USB_F_MASS_STORAGE="m"
+--cfg=CONFIG_IKCONFIG
+--cfg=CONFIG_IKCONFIG="y"
+--cfg=CONFIG_NET_VENDOR_GOOGLE
+--cfg=CONFIG_NET_VENDOR_GOOGLE="y"
+--cfg=CONFIG_DEBUG_PLIST
+--cfg=CONFIG_DEBUG_PLIST="y"
+--cfg=CONFIG_GENERIC_IRQ_MIGRATION
+--cfg=CONFIG_GENERIC_IRQ_MIGRATION="y"
+--cfg=CONFIG_NET_VENDOR_NETRONOME
+--cfg=CONFIG_NET_VENDOR_NETRONOME="y"
+--cfg=CONFIG_DEBUG_LIST
+--cfg=CONFIG_DEBUG_LIST="y"
+--cfg=CONFIG_NFS_USE_KERNEL_DNS
+--cfg=CONFIG_NFS_USE_KERNEL_DNS="y"
+--cfg=CONFIG_ARCH_HAS_FORTIFY_SOURCE
+--cfg=CONFIG_ARCH_HAS_FORTIFY_SOURCE="y"
+--cfg=CONFIG_GCC_VERSION="130200"
+--cfg=CONFIG_CRYPTO_LIB_POLY1305_RSIZE="1"
+--cfg=CONFIG_SYSCTL
+--cfg=CONFIG_SYSCTL="y"
+--cfg=CONFIG_CC_CAN_LINK_STATIC
+--cfg=CONFIG_CC_CAN_LINK_STATIC="y"
+--cfg=CONFIG_ARCH_HAS_GCOV_PROFILE_ALL
+--cfg=CONFIG_ARCH_HAS_GCOV_PROFILE_ALL="y"
+--cfg=CONFIG_BRIDGE_IGMP_SNOOPING
+--cfg=CONFIG_BRIDGE_IGMP_SNOOPING="y"
+--cfg=CONFIG_PHYS_ADDR_T_64BIT
+--cfg=CONFIG_PHYS_ADDR_T_64BIT="y"
+--cfg=CONFIG_THREAD_INFO_IN_TASK
+--cfg=CONFIG_THREAD_INFO_IN_TASK="y"
+--cfg=CONFIG_NET_VENDOR_LITEX
+--cfg=CONFIG_NET_VENDOR_LITEX="y"
+--cfg=CONFIG_GENERIC_MSI_IRQ
+--cfg=CONFIG_GENERIC_MSI_IRQ="y"
+--cfg=CONFIG_HAVE_ARCH_TRACEHOOK
+--cfg=CONFIG_HAVE_ARCH_TRACEHOOK="y"
+--cfg=CONFIG_RPMSG_CHAR
+--cfg=CONFIG_RPMSG_CHAR="y"
+--cfg=CONFIG_ARCH_STARFIVE
+--cfg=CONFIG_ARCH_STARFIVE="y"
+--cfg=CONFIG_PCI_DOMAINS_GENERIC
+--cfg=CONFIG_PCI_DOMAINS_GENERIC="y"
+--cfg=CONFIG_DRM_FBDEV_OVERALLOC="100"
+--cfg=CONFIG_XFRM_USER
+--cfg=CONFIG_XFRM_USER="m"
+--cfg=CONFIG_CPUFREQ_DT_PLATDEV
+--cfg=CONFIG_CPUFREQ_DT_PLATDEV="y"
+--cfg=CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
+--cfg=CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW="y"
+--cfg=CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+--cfg=CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE="y"
+--cfg=CONFIG_NET_NS
+--cfg=CONFIG_NET_NS="y"
+--cfg=CONFIG_HAVE_PERF_EVENTS
+--cfg=CONFIG_HAVE_PERF_EVENTS="y"
+--cfg=CONFIG_BTT
+--cfg=CONFIG_BTT="y"
+--cfg=CONFIG_ATA_SFF
+--cfg=CONFIG_ATA_SFF="y"
+--cfg=CONFIG_NET_VENDOR_SOLARFLARE
+--cfg=CONFIG_NET_VENDOR_SOLARFLARE="y"
+--cfg=CONFIG_CAN_GW
+--cfg=CONFIG_CAN_GW="m"
+--cfg=CONFIG_STMMAC_ETH
+--cfg=CONFIG_STMMAC_ETH="m"
+--cfg=CONFIG_BLK_DEV_PMEM
+--cfg=CONFIG_BLK_DEV_PMEM="y"
+--cfg=CONFIG_DEBUG_MEMORY_INIT
+--cfg=CONFIG_DEBUG_MEMORY_INIT="y"
+--cfg=CONFIG_XFRM_ESP
+--cfg=CONFIG_XFRM_ESP="m"
+--cfg=CONFIG_AUDIT
+--cfg=CONFIG_AUDIT="y"
+--cfg=CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE
+--cfg=CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE="y"
+--cfg=CONFIG_HAVE_RETHOOK
+--cfg=CONFIG_HAVE_RETHOOK="y"
+--cfg=CONFIG_NET_9P_FD
+--cfg=CONFIG_NET_9P_FD="y"
+--cfg=CONFIG_LTO_NONE
+--cfg=CONFIG_LTO_NONE="y"
+--cfg=CONFIG_PCIEASPM
+--cfg=CONFIG_PCIEASPM="y"
+--cfg=CONFIG_DEBUG_FS_ALLOW_ALL
+--cfg=CONFIG_DEBUG_FS_ALLOW_ALL="y"
+--cfg=CONFIG_FB_DEFERRED_IO
+--cfg=CONFIG_FB_DEFERRED_IO="y"
+--cfg=CONFIG_SATA_AHCI
+--cfg=CONFIG_SATA_AHCI="y"
+--cfg=CONFIG_SECURITY
+--cfg=CONFIG_SECURITY="y"
+--cfg=CONFIG_MAX_SKB_FRAGS="17"
+--cfg=CONFIG_PORTABLE
+--cfg=CONFIG_PORTABLE="y"
+--cfg=CONFIG_SND_TIMER
+--cfg=CONFIG_SND_TIMER="y"
+--cfg=CONFIG_KVM_MMIO
+--cfg=CONFIG_KVM_MMIO="y"
+--cfg=CONFIG_CLK_SIFIVE
+--cfg=CONFIG_CLK_SIFIVE="y"
+--cfg=CONFIG_USB_EHCI_TT_NEWSCHED
+--cfg=CONFIG_USB_EHCI_TT_NEWSCHED="y"
+--cfg=CONFIG_FAT_DEFAULT_CODEPAGE="437"
+--cfg=CONFIG_BLK_DEV
+--cfg=CONFIG_BLK_DEV="y"
+--cfg=CONFIG_BRIDGE_NETFILTER
+--cfg=CONFIG_BRIDGE_NETFILTER="m"
+--cfg=CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+--cfg=CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI="y"
+--cfg=CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
+--cfg=CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT="y"
+--cfg=CONFIG_OF_FLATTREE
+--cfg=CONFIG_OF_FLATTREE="y"
+--cfg=CONFIG_HAVE_ARCH_KFENCE
+--cfg=CONFIG_HAVE_ARCH_KFENCE="y"
+--cfg=CONFIG_WLAN_VENDOR_SILABS
+--cfg=CONFIG_WLAN_VENDOR_SILABS="y"
+--cfg=CONFIG_IOMMU_API
+--cfg=CONFIG_IOMMU_API="y"
+--cfg=CONFIG_RISCV_ISA_FALLBACK
+--cfg=CONFIG_RISCV_ISA_FALLBACK="y"
+--cfg=CONFIG_GPIO_GENERIC
+--cfg=CONFIG_GPIO_GENERIC="y"
+--cfg=CONFIG_TRACING_SUPPORT
+--cfg=CONFIG_TRACING_SUPPORT="y"
+--cfg=CONFIG_UNIX98_PTYS
+--cfg=CONFIG_UNIX98_PTYS="y"
+--cfg=CONFIG_DEBUG_VM_PGFLAGS
+--cfg=CONFIG_DEBUG_VM_PGFLAGS="y"
+--cfg=CONFIG_NET_RX_BUSY_POLL
+--cfg=CONFIG_NET_RX_BUSY_POLL="y"
+--cfg=CONFIG_NET_VENDOR_SOCIONEXT
+--cfg=CONFIG_NET_VENDOR_SOCIONEXT="y"
+--cfg=CONFIG_SECURITY_SELINUX
+--cfg=CONFIG_SECURITY_SELINUX="y"
+--cfg=CONFIG_ZONE_DMA32
+--cfg=CONFIG_ZONE_DMA32="y"
+--cfg=CONFIG_NET_SCHED
+--cfg=CONFIG_NET_SCHED="y"
+--cfg=CONFIG_ARCH_SUPPORTS_KEXEC
+--cfg=CONFIG_ARCH_SUPPORTS_KEXEC="y"
+--cfg=CONFIG_DRM_PANEL
+--cfg=CONFIG_DRM_PANEL="y"
+--cfg=CONFIG_PRINTK_TIME
+--cfg=CONFIG_PRINTK_TIME="y"
+--cfg=CONFIG_ARCH_VIRT
+--cfg=CONFIG_ARCH_VIRT="y"
+--cfg=CONFIG_INPUT_MOUSEDEV_SCREEN_Y="768"
+--cfg=CONFIG_TASKS_RCU_GENERIC
+--cfg=CONFIG_TASKS_RCU_GENERIC="y"
+--cfg=CONFIG_SECCOMP_FILTER
+--cfg=CONFIG_SECCOMP_FILTER="y"
+--cfg=CONFIG_IRQCHIP
+--cfg=CONFIG_IRQCHIP="y"
+--cfg=CONFIG_INET_DIAG
+--cfg=CONFIG_INET_DIAG="y"
+--cfg=CONFIG_CRYPTO_GHASH
+--cfg=CONFIG_CRYPTO_GHASH="m"
+--cfg=CONFIG_GENERIC_ENTRY
+--cfg=CONFIG_GENERIC_ENTRY="y"
+--cfg=CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
+--cfg=CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW="y"
+--cfg=CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS="9"
+--cfg=CONFIG_NF_NAT_FTP
+--cfg=CONFIG_NF_NAT_FTP="m"
+--cfg=CONFIG_NF_CT_PROTO_UDPLITE
+--cfg=CONFIG_NF_CT_PROTO_UDPLITE="y"
+--cfg=CONFIG_IKCONFIG_PROC
+--cfg=CONFIG_IKCONFIG_PROC="y"
+--cfg=CONFIG_ELF_CORE
+--cfg=CONFIG_ELF_CORE="y"
+--cfg=CONFIG_PCI_HOST_COMMON
+--cfg=CONFIG_PCI_HOST_COMMON="y"
+--cfg=CONFIG_HAVE_CONTEXT_TRACKING_USER
+--cfg=CONFIG_HAVE_CONTEXT_TRACKING_USER="y"
+--cfg=CONFIG_MODULE_SECTIONS
+--cfg=CONFIG_MODULE_SECTIONS="y"
+--cfg=CONFIG_USB_SUPPORT
+--cfg=CONFIG_USB_SUPPORT="y"
+--cfg=CONFIG_HAVE_ARCH_KGDB_QXFER_PKT
+--cfg=CONFIG_HAVE_ARCH_KGDB_QXFER_PKT="y"
+--cfg=CONFIG_WLAN_VENDOR_ST
+--cfg=CONFIG_WLAN_VENDOR_ST="y"
+--cfg=CONFIG_PCP_BATCH_SCALE_MAX="5"
+--cfg=CONFIG_BLK_DEV_NVME
+--cfg=CONFIG_BLK_DEV_NVME="m"
+--cfg=CONFIG_SOC_BUS
+--cfg=CONFIG_SOC_BUS="y"
+--cfg=CONFIG_NET_VENDOR_SIS
+--cfg=CONFIG_NET_VENDOR_SIS="y"
+--cfg=CONFIG_HAVE_64BIT_ALIGNED_ACCESS
+--cfg=CONFIG_HAVE_64BIT_ALIGNED_ACCESS="y"
+--cfg=CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE
+--cfg=CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE="y"
+--cfg=CONFIG_PAGE_POOL
+--cfg=CONFIG_PAGE_POOL="y"
+--cfg=CONFIG_SERIAL_8250_16550A_VARIANTS
+--cfg=CONFIG_SERIAL_8250_16550A_VARIANTS="y"
+--cfg=CONFIG_INIT_STACK_ALL_ZERO
+--cfg=CONFIG_INIT_STACK_ALL_ZERO="y"
+--cfg=CONFIG_VT_CONSOLE
+--cfg=CONFIG_VT_CONSOLE="y"
+--cfg=CONFIG_HW_RANDOM_VIRTIO
+--cfg=CONFIG_HW_RANDOM_VIRTIO="y"
+--cfg=CONFIG_AS_HAS_INSN
+--cfg=CONFIG_AS_HAS_INSN="y"
+--cfg=CONFIG_MQ_IOSCHED_KYBER
+--cfg=CONFIG_MQ_IOSCHED_KYBER="y"
+--cfg=CONFIG_AS_VERSION="24200"
+--cfg=CONFIG_CC_HAS_INT128
+--cfg=CONFIG_CC_HAS_INT128="y"
+--cfg=CONFIG_EFI_EARLYCON
+--cfg=CONFIG_EFI_EARLYCON="y"
+--cfg=CONFIG_WLAN_VENDOR_MARVELL
+--cfg=CONFIG_WLAN_VENDOR_MARVELL="y"
+--cfg=CONFIG_NOP_USB_XCEIV
+--cfg=CONFIG_NOP_USB_XCEIV="m"
+--cfg=CONFIG_NET_SOCK_MSG
+--cfg=CONFIG_NET_SOCK_MSG="y"
+--cfg=CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+--cfg=CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE="y"
+--cfg=CONFIG_SERIAL_8250_DWLIB
+--cfg=CONFIG_SERIAL_8250_DWLIB="y"
+--cfg=CONFIG_USB_F_NCM
+--cfg=CONFIG_USB_F_NCM="m"
+--cfg=CONFIG_ARCH_MMAP_RND_COMPAT_BITS="8"
+--cfg=CONFIG_DRM
+--cfg=CONFIG_DRM="m"
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7110
+--cfg=CONFIG_PINCTRL_STARFIVE_JH7110="y"
+--cfg=CONFIG_POSIX_MQUEUE_SYSCTL
+--cfg=CONFIG_POSIX_MQUEUE_SYSCTL="y"
+--cfg=CONFIG_VHOST_MENU
+--cfg=CONFIG_VHOST_MENU="y"
+--cfg=CONFIG_DRM_EXEC
+--cfg=CONFIG_DRM_EXEC="m"
+--cfg=CONFIG_DEBUG_MISC
+--cfg=CONFIG_DEBUG_MISC="y"
+--cfg=CONFIG_FB_CFB_COPYAREA
+--cfg=CONFIG_FB_CFB_COPYAREA="y"
+--cfg=CONFIG_USB_F_ECM
+--cfg=CONFIG_USB_F_ECM="m"
+--cfg=CONFIG_HAVE_KVM_IRQCHIP
+--cfg=CONFIG_HAVE_KVM_IRQCHIP="y"
+--cfg=CONFIG_NET_VENDOR_MICROSEMI
+--cfg=CONFIG_NET_VENDOR_MICROSEMI="y"
+--cfg=CONFIG_BALLOON_COMPACTION
+--cfg=CONFIG_BALLOON_COMPACTION="y"
+--cfg=CONFIG_ARCH_OPTIONAL_KERNEL_RWX
+--cfg=CONFIG_ARCH_OPTIONAL_KERNEL_RWX="y"
+--cfg=CONFIG_ARCH_HAS_TICK_BROADCAST
+--cfg=CONFIG_ARCH_HAS_TICK_BROADCAST="y"
+--cfg=CONFIG_BINARY_PRINTF
+--cfg=CONFIG_BINARY_PRINTF="y"
+--cfg=CONFIG_ZSTD_DECOMPRESS
+--cfg=CONFIG_ZSTD_DECOMPRESS="y"
+--cfg=CONFIG_WLAN_VENDOR_QUANTENNA
+--cfg=CONFIG_WLAN_VENDOR_QUANTENNA="y"
+--cfg=CONFIG_SND_PCM_TIMER
+--cfg=CONFIG_SND_PCM_TIMER="y"
+--cfg=CONFIG_ARCH_HAS_SET_DIRECT_MAP
+--cfg=CONFIG_ARCH_HAS_SET_DIRECT_MAP="y"
+--cfg=CONFIG_SYSVIPC_SYSCTL
+--cfg=CONFIG_SYSVIPC_SYSCTL="y"
+--cfg=CONFIG_WLAN_VENDOR_TI
+--cfg=CONFIG_WLAN_VENDOR_TI="y"
+--cfg=CONFIG_DMA_SHARED_BUFFER
+--cfg=CONFIG_DMA_SHARED_BUFFER="y"
+--cfg=CONFIG_RTC_SYSTOHC
+--cfg=CONFIG_RTC_SYSTOHC="y"
+--cfg=CONFIG_OF_ADDRESS
+--cfg=CONFIG_OF_ADDRESS="y"
+--cfg=CONFIG_DECOMPRESS_GZIP
+--cfg=CONFIG_DECOMPRESS_GZIP="y"
+--cfg=CONFIG_VIRTIO_MENU
+--cfg=CONFIG_VIRTIO_MENU="y"
+--cfg=CONFIG_VIRTIO_BLK
+--cfg=CONFIG_VIRTIO_BLK="y"
+--cfg=CONFIG_DECOMPRESS_LZO
+--cfg=CONFIG_DECOMPRESS_LZO="y"
+--cfg=CONFIG_CRYPTO_XXHASH
+--cfg=CONFIG_CRYPTO_XXHASH="m"
+--cfg=CONFIG_64BIT
+--cfg=CONFIG_64BIT="y"
+--cfg=CONFIG_MMC_TMIO_CORE
+--cfg=CONFIG_MMC_TMIO_CORE="y"
+--cfg=CONFIG_MMC_SDHCI_PLTFM
+--cfg=CONFIG_MMC_SDHCI_PLTFM="y"
+--cfg=CONFIG_I2C_CHARDEV
+--cfg=CONFIG_I2C_CHARDEV="m"
+--cfg=CONFIG_LLC
+--cfg=CONFIG_LLC="m"
+--cfg=CONFIG_ARCH_USE_QUEUED_RWLOCKS
+--cfg=CONFIG_ARCH_USE_QUEUED_RWLOCKS="y"
+--cfg=CONFIG_ARCH_KEEP_MEMBLOCK
+--cfg=CONFIG_ARCH_KEEP_MEMBLOCK="y"
+--cfg=CONFIG_REGMAP_MMIO
+--cfg=CONFIG_REGMAP_MMIO="y"
+--cfg=CONFIG_NETWORK_SECMARK
+--cfg=CONFIG_NETWORK_SECMARK="y"
+--cfg=CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+--cfg=CONFIG_GENERIC_CLOCKEVENTS_BROADCAST="y"
+--cfg=CONFIG_POWER_SUPPLY_HWMON
+--cfg=CONFIG_POWER_SUPPLY_HWMON="y"
+--cfg=CONFIG_SERIAL_8250_PCI
+--cfg=CONFIG_SERIAL_8250_PCI="y"
+--cfg=CONFIG_MOUSE_PS2_SYNAPTICS
+--cfg=CONFIG_MOUSE_PS2_SYNAPTICS="y"
+--cfg=CONFIG_ATA_BMDMA
+--cfg=CONFIG_ATA_BMDMA="y"
+--cfg=CONFIG_XZ_DEC_ARMTHUMB
+--cfg=CONFIG_XZ_DEC_ARMTHUMB="y"
+--cfg=CONFIG_NFS_V4_1
+--cfg=CONFIG_NFS_V4_1="y"
+--cfg=CONFIG_ARCH_WANT_FRAME_POINTERS
+--cfg=CONFIG_ARCH_WANT_FRAME_POINTERS="y"
+--cfg=CONFIG_REGMAP
+--cfg=CONFIG_REGMAP="y"
+--cfg=CONFIG_FB_IOMEM_HELPERS
+--cfg=CONFIG_FB_IOMEM_HELPERS="y"
+--cfg=CONFIG_PCIE_PME
+--cfg=CONFIG_PCIE_PME="y"
+--cfg=CONFIG_HAVE_MOD_ARCH_SPECIFIC
+--cfg=CONFIG_HAVE_MOD_ARCH_SPECIFIC="y"
+--cfg=CONFIG_ERRATA_SIFIVE
+--cfg=CONFIG_ERRATA_SIFIVE="y"
+--cfg=CONFIG_FB_NOTIFY
+--cfg=CONFIG_FB_NOTIFY="y"
+--cfg=CONFIG_CAN_DEV
+--cfg=CONFIG_CAN_DEV="m"
+--cfg=CONFIG_STRICT_MODULE_RWX
+--cfg=CONFIG_STRICT_MODULE_RWX="y"
+--cfg=CONFIG_ERRATA_SIFIVE_CIP_1200
+--cfg=CONFIG_ERRATA_SIFIVE_CIP_1200="y"
+--cfg=CONFIG_SYSCTL_EXCEPTION_TRACE
+--cfg=CONFIG_SYSCTL_EXCEPTION_TRACE="y"
+--cfg=CONFIG_SYSVIPC_COMPAT
+--cfg=CONFIG_SYSVIPC_COMPAT="y"
+--cfg=CONFIG_FHANDLE
+--cfg=CONFIG_FHANDLE="y"
+--cfg=CONFIG_WATCHDOG_OPEN_TIMEOUT="0"
+--cfg=CONFIG_CRYPTO_LIB_SHA256
+--cfg=CONFIG_CRYPTO_LIB_SHA256="m"
+--cfg=CONFIG_SWAP
+--cfg=CONFIG_SWAP="y"
+--cfg=CONFIG_FW_CACHE
+--cfg=CONFIG_FW_CACHE="y"
+--cfg=CONFIG_RESET_POLARFIRE_SOC
+--cfg=CONFIG_RESET_POLARFIRE_SOC="y"
+--cfg=CONFIG_STACKPROTECTOR_PER_TASK
+--cfg=CONFIG_STACKPROTECTOR_PER_TASK="y"
+--cfg=CONFIG_CRC_CCITT
+--cfg=CONFIG_CRC_CCITT="m"
+--cfg=CONFIG_IPVLAN_L3S
+--cfg=CONFIG_IPVLAN_L3S="y"
+--cfg=CONFIG_NET_VENDOR_CAVIUM
+--cfg=CONFIG_NET_VENDOR_CAVIUM="y"
+--cfg=CONFIG_GPIOLIB_IRQCHIP
+--cfg=CONFIG_GPIOLIB_IRQCHIP="y"
+--cfg=CONFIG_BPF_UNPRIV_DEFAULT_OFF
+--cfg=CONFIG_BPF_UNPRIV_DEFAULT_OFF="y"
+--cfg=CONFIG_BLK_DEV_SD
+--cfg=CONFIG_BLK_DEV_SD="y"
+--cfg=CONFIG_MODULE_UNLOAD
+--cfg=CONFIG_MODULE_UNLOAD="y"
+--cfg=CONFIG_PREEMPT_COUNT
+--cfg=CONFIG_PREEMPT_COUNT="y"
+--cfg=CONFIG_NET_VENDOR_ENGLEDER
+--cfg=CONFIG_NET_VENDOR_ENGLEDER="y"
+--cfg=CONFIG_RWSEM_SPIN_ON_OWNER
+--cfg=CONFIG_RWSEM_SPIN_ON_OWNER="y"
+--cfg=CONFIG_CLK_STARFIVE_JH7110_AON
+--cfg=CONFIG_CLK_STARFIVE_JH7110_AON="m"
+--cfg=CONFIG_GENERIC_PINMUX_FUNCTIONS
+--cfg=CONFIG_GENERIC_PINMUX_FUNCTIONS="y"
+--cfg=CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+--cfg=CONFIG_CC_HAS_ASM_GOTO_OUTPUT="y"
+--cfg=CONFIG_BITREVERSE
+--cfg=CONFIG_BITREVERSE="y"
+--cfg=CONFIG_DEVPORT
+--cfg=CONFIG_DEVPORT="y"
+--cfg=CONFIG_IOSCHED_BFQ
+--cfg=CONFIG_IOSCHED_BFQ="y"
+--cfg=CONFIG_PNP_DEBUG_MESSAGES
+--cfg=CONFIG_PNP_DEBUG_MESSAGES="y"
+--cfg=CONFIG_NF_CONNTRACK
+--cfg=CONFIG_NF_CONNTRACK="m"
+--cfg=CONFIG_EFI_RUNTIME_WRAPPERS
+--cfg=CONFIG_EFI_RUNTIME_WRAPPERS="y"
+--cfg=CONFIG_MDIO_DEVRES
+--cfg=CONFIG_MDIO_DEVRES="y"
+--cfg=CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,bpf"
+--cfg=CONFIG_ARCH_DMA_ADDR_T_64BIT
+--cfg=CONFIG_ARCH_DMA_ADDR_T_64BIT="y"
+--cfg=CONFIG_FILE_LOCKING
+--cfg=CONFIG_FILE_LOCKING="y"
+--cfg=CONFIG_SND_SOC_I2C_AND_SPI
+--cfg=CONFIG_SND_SOC_I2C_AND_SPI="m"
+--cfg=CONFIG_CAN_RCAR_CANFD
+--cfg=CONFIG_CAN_RCAR_CANFD="m"
+--cfg=CONFIG_AIO
+--cfg=CONFIG_AIO="y"
+--cfg=CONFIG_OF
+--cfg=CONFIG_OF="y"
+--cfg=CONFIG_PERF_EVENTS
+--cfg=CONFIG_PERF_EVENTS="y"
+--cfg=CONFIG_GENERIC_TIME_VSYSCALL
+--cfg=CONFIG_GENERIC_TIME_VSYSCALL="y"
+--cfg=CONFIG_IP_NF_TARGET_REJECT
+--cfg=CONFIG_IP_NF_TARGET_REJECT="m"
+--cfg=CONFIG_HAVE_MOVE_PMD
+--cfg=CONFIG_HAVE_MOVE_PMD="y"
+--cfg=CONFIG_KALLSYMS_BASE_RELATIVE
+--cfg=CONFIG_KALLSYMS_BASE_RELATIVE="y"
+--cfg=CONFIG_IP_VS_TAB_BITS="12"
+--cfg=CONFIG_RTC_INTF_DEV
+--cfg=CONFIG_RTC_INTF_DEV="y"
+--cfg=CONFIG_SPI_SIFIVE
+--cfg=CONFIG_SPI_SIFIVE="y"
+--cfg=CONFIG_MTD_MAP_BANK_WIDTH_4
+--cfg=CONFIG_MTD_MAP_BANK_WIDTH_4="y"
+--cfg=CONFIG_HID_SUPPORT
+--cfg=CONFIG_HID_SUPPORT="y"
+--cfg=CONFIG_DEBUG_PAGEALLOC
+--cfg=CONFIG_DEBUG_PAGEALLOC="y"
+--cfg=CONFIG_MESSAGE_LOGLEVEL_DEFAULT="4"
+--cfg=CONFIG_LOCKUP_DETECTOR
+--cfg=CONFIG_LOCKUP_DETECTOR="y"
+--cfg=CONFIG_IP_VS_PROTO_UDP
+--cfg=CONFIG_IP_VS_PROTO_UDP="y"
+--cfg=CONFIG_NLS_DEFAULT="iso8859-1"
+--cfg=CONFIG_UTS_NS
+--cfg=CONFIG_UTS_NS="y"
+--cfg=CONFIG_VIDEO_NOMODESET
+--cfg=CONFIG_VIDEO_NOMODESET="y"
+--cfg=CONFIG_PAGE_REPORTING
+--cfg=CONFIG_PAGE_REPORTING="y"
+--cfg=CONFIG_DMA_ENGINE
+--cfg=CONFIG_DMA_ENGINE="y"
+--cfg=CONFIG_CGROUP_PIDS
+--cfg=CONFIG_CGROUP_PIDS="y"
+--cfg=CONFIG_CRYPTO_AEAD2
+--cfg=CONFIG_CRYPTO_AEAD2="y"
+--cfg=CONFIG_MOUSE_PS2
+--cfg=CONFIG_MOUSE_PS2="y"
+--cfg=CONFIG_IP_VS_SH_TAB_BITS="8"
+--cfg=CONFIG_CRYPTO_ALGAPI2
+--cfg=CONFIG_CRYPTO_ALGAPI2="y"
+--cfg=CONFIG_INPUT
+--cfg=CONFIG_INPUT="y"
+--cfg=CONFIG_PROC_SYSCTL
+--cfg=CONFIG_PROC_SYSCTL="y"
+--cfg=CONFIG_FWNODE_MDIO
+--cfg=CONFIG_FWNODE_MDIO="y"
+--cfg=CONFIG_RD_LZ4
+--cfg=CONFIG_RD_LZ4="y"
+--cfg=CONFIG_MMU
+--cfg=CONFIG_MMU="y"
+--cfg=CONFIG_LD_VERSION="24200"
+--cfg=CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY
+--cfg=CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY="y"
diff --git a/riscv/include/generated/timeconst.h b/riscv/include/generated/timeconst.h
new file mode 100644
index 0000000..78cc14a
--- /dev/null
+++ b/riscv/include/generated/timeconst.h
@@ -0,0 +1,40 @@
+/* Automatically generated by kernel/time/timeconst.bc */
+/* Time conversion constants for HZ == 250 */
+
+#ifndef KERNEL_TIMECONST_H
+#define KERNEL_TIMECONST_H
+
+#include <linux/param.h>
+#include <linux/types.h>
+
+#if HZ != 250
+#error "include/generated/timeconst.h has the wrong HZ value!"
+#endif
+
+#define HZ_TO_MSEC_MUL32 U64_C(0x80000000)
+#define HZ_TO_MSEC_ADJ32 U64_C(0x0)
+#define HZ_TO_MSEC_SHR32 29
+#define MSEC_TO_HZ_MUL32 U64_C(0x80000000)
+#define MSEC_TO_HZ_ADJ32 U64_C(0x180000000)
+#define MSEC_TO_HZ_SHR32 33
+#define HZ_TO_MSEC_NUM 4
+#define HZ_TO_MSEC_DEN 1
+#define MSEC_TO_HZ_NUM 1
+#define MSEC_TO_HZ_DEN 4
+
+#define HZ_TO_USEC_MUL32 U64_C(0xFA000000)
+#define HZ_TO_USEC_ADJ32 U64_C(0x0)
+#define HZ_TO_USEC_SHR32 20
+#define USEC_TO_HZ_MUL32 U64_C(0x83126E98)
+#define USEC_TO_HZ_ADJ32 U64_C(0x7FF7CED9168)
+#define USEC_TO_HZ_SHR32 43
+#define HZ_TO_USEC_NUM 4000
+#define HZ_TO_USEC_DEN 1
+#define USEC_TO_HZ_NUM 1
+#define USEC_TO_HZ_DEN 4000
+#define HZ_TO_NSEC_NUM 4000000
+#define HZ_TO_NSEC_DEN 1
+#define NSEC_TO_HZ_NUM 1
+#define NSEC_TO_HZ_DEN 4000000
+
+#endif /* KERNEL_TIMECONST_H */
diff --git a/riscv/include/generated/uapi/linux/version.h b/riscv/include/generated/uapi/linux/version.h
new file mode 100644
index 0000000..a79fa38
--- /dev/null
+++ b/riscv/include/generated/uapi/linux/version.h
@@ -0,0 +1,5 @@
+#define LINUX_VERSION_CODE 395011
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
+#define LINUX_VERSION_MAJOR 6
+#define LINUX_VERSION_PATCHLEVEL 7
+#define LINUX_VERSION_SUBLEVEL 3
diff --git a/riscv/include/generated/utsrelease.h b/riscv/include/generated/utsrelease.h
new file mode 100644
index 0000000..0468144
--- /dev/null
+++ b/riscv/include/generated/utsrelease.h
@@ -0,0 +1 @@
+#define UTS_RELEASE "6.7.3"
diff --git a/riscv/include/generated/vdso-offsets.h b/riscv/include/generated/vdso-offsets.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/riscv/include/generated/vdso-offsets.h
diff --git a/riscv/include/mach/riscv/boolean.h b/riscv/include/mach/riscv/boolean.h
new file mode 100644
index 0000000..dcb513c
--- /dev/null
+++ b/riscv/include/mach/riscv/boolean.h
@@ -0,0 +1,37 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: boolean.h
+ *
+ * Boolean type, for RISCV.
+ */
+
+#ifndef _MACH_RISCV_BOOLEAN_H_
+#define _MACH_RISCV_BOOLEAN_H_
+
+typedef int boolean_t;
+
+#endif /* _MACH_RISCV_BOOLEAN_H_ */
diff --git a/riscv/include/mach/riscv/eflags.h b/riscv/include/mach/riscv/eflags.h
new file mode 100644
index 0000000..f195074
--- /dev/null
+++ b/riscv/include/mach/riscv/eflags.h
@@ -0,0 +1,53 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_RISCV_EFLAGS_H_
+#define _MACH_RISCV_EFLAGS_H_
+
+/*
+ * riscv flags register
+ */
+#define EFL_CF 0x00000001 /* carry */
+#define EFL_PF 0x00000004 /* parity of low 8 bits */
+#define EFL_AF 0x00000010 /* carry out of bit 3 */
+#define EFL_ZF 0x00000040 /* zero */
+#define EFL_SF 0x00000080 /* sign */
+#define EFL_TF 0x00000100 /* trace trap */
+#define EFL_IF 0x00000200 /* interrupt enable */
+#define EFL_DF 0x00000400 /* direction */
+#define EFL_OF 0x00000800 /* overflow */
+#define EFL_IOPL 0x00003000 /* IO privilege level: */
+#define EFL_IOPL_KERNEL 0x00000000 /* kernel */
+#define EFL_IOPL_USER 0x00003000 /* user */
+#define EFL_NT 0x00004000 /* nested task */
+#define EFL_RF 0x00010000 /* resume without tracing */
+#define EFL_VM 0x00020000 /* virtual 8086 mode */
+#define EFL_AC 0x00040000 /* alignment check */
+#define EFL_VI 0x00080000 /* virtual interrupt */
+#define EFL_VIP 0x00100000 /* virtual interrupt pending */
+#define EFL_ID 0x00200000 /* cpuid available */
+
+#endif /* _MACH_RISCV_EFLAGS_H_ */
diff --git a/riscv/include/mach/riscv/exec/elf.h b/riscv/include/mach/riscv/exec/elf.h
new file mode 100644
index 0000000..badeb01
--- /dev/null
+++ b/riscv/include/mach/riscv/exec/elf.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_RISCV_EXEC_ELF_H_
+#define _MACH_RISCV_EXEC_ELF_H_
+
+typedef unsigned int Elf32_Addr;
+typedef unsigned short Elf32_Half;
+typedef unsigned int Elf32_Off;
+typedef signed int Elf32_Sword;
+typedef unsigned int Elf32_Word;
+
+typedef uint64_t Elf64_Addr;
+typedef uint64_t Elf64_Off;
+typedef int32_t Elf64_Shalf;
+typedef int32_t Elf64_Sword;
+typedef uint32_t Elf64_Word;
+typedef int64_t Elf64_Sxword;
+typedef uint64_t Elf64_Xword;
+typedef uint16_t Elf64_Half;
+
+
+/* Architecture identification parameters for RISCV. */
+#if defined(__riscv_xlen) && __riscv_xlen == 64 && ! defined(USER32)
+#define MY_ELF_CLASS ELFCLASS64
+#define MY_EI_DATA ELFDATA2LSB
+//#define MY_E_MACHINE EM_RISCV64
+#define MY_E_MACHINE EM_X86_64
+#else
+#define MY_ELF_CLASS ELFCLASS32
+#define MY_EI_DATA ELFDATA2LSB
+//#define MY_E_MACHINE EM_RISCV32
+#define MY_E_MACHINE EM_386
+#endif
+
+#endif /* _MACH_RISCV_EXEC_ELF_H_ */
diff --git a/riscv/include/mach/riscv/fp_reg.h b/riscv/include/mach/riscv/fp_reg.h
new file mode 100644
index 0000000..5f3ccd0
--- /dev/null
+++ b/riscv/include/mach/riscv/fp_reg.h
@@ -0,0 +1,140 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992-1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_RISCV_FP_REG_H_
+#define _MACH_RISCV_FP_REG_H_
+
+/*
+ * Floating point registers and status, as saved
+ * and restored by FP save/restore instructions.
+ */
+struct i386_fp_save {
+ unsigned short fp_control; /* control */
+ unsigned short fp_unused_1;
+ unsigned short fp_status; /* status */
+ unsigned short fp_unused_2;
+ unsigned short fp_tag; /* register tags */
+ unsigned short fp_unused_3;
+ unsigned int fp_eip; /* eip at failed instruction */
+ unsigned short fp_cs; /* cs at failed instruction */
+ unsigned short fp_opcode; /* opcode of failed instruction */
+ unsigned int fp_dp; /* data address */
+ unsigned short fp_ds; /* data segment */
+ unsigned short fp_unused_4;
+};
+
+struct i386_fp_regs {
+ unsigned short fp_reg_word[8][5];
+ /* space for 8 80-bit FP registers */
+};
+
+#define XSAVE_XCOMP_BV_COMPACT (((unsigned long long)1) << 63)
+struct i386_xfp_xstate_header {
+ unsigned long long xfp_features;
+ unsigned long long xcomp_bv;
+ unsigned long long reserved[6];
+} __attribute__((packed, aligned(64)));
+
+struct i386_xfp_save {
+ unsigned short fp_control; /* control */
+ unsigned short fp_status; /* status */
+ unsigned short fp_tag; /* register tags */
+ unsigned short fp_opcode; /* opcode of failed instruction */
+ unsigned int fp_eip; /* eip at failed instruction */
+ unsigned short fp_cs; /* cs at failed instruction / eip high */
+ unsigned short fp_eip3; /* eip higher */
+ unsigned int fp_dp; /* data address */
+ unsigned short fp_ds; /* data segment / dp high */
+ unsigned short fp_dp3; /* dp higher */
+ unsigned int fp_mxcsr; /* MXCSR */
+ unsigned int fp_mxcsr_mask; /* MXCSR_MASK */
+ unsigned char fp_reg_word[8][16];
+ /* space for 8 128-bit FP registers */
+ unsigned char fp_xreg_word[16][16];
+ /* space for 16 128-bit XMM registers */
+ unsigned int padding[24];
+ struct i386_xfp_xstate_header header;
+ unsigned char extended[0]; /* Extended region */
+} __attribute__((packed, aligned(64)));
+
+/*
+ * Control register
+ */
+#define FPC_IE 0x0001 /* enable invalid operation
+ exception */
+#define FPC_IM FPC_IE
+#define FPC_DE 0x0002 /* enable denormalized operation
+ exception */
+#define FPC_DM FPC_DE
+#define FPC_ZE 0x0004 /* enable zero-divide exception */
+#define FPC_ZM FPC_ZE
+#define FPC_OE 0x0008 /* enable overflow exception */
+#define FPC_OM FPC_OE
+#define FPC_UE 0x0010 /* enable underflow exception */
+#define FPC_PE 0x0020 /* enable precision exception */
+#define FPC_PC 0x0300 /* precision control: */
+#define FPC_PC_24 0x0000 /* 24 bits */
+#define FPC_PC_53 0x0200 /* 53 bits */
+#define FPC_PC_64 0x0300 /* 64 bits */
+#define FPC_RC 0x0c00 /* rounding control: */
+#define FPC_RC_RN 0x0000 /* round to nearest or even */
+#define FPC_RC_RD 0x0400 /* round down */
+#define FPC_RC_RU 0x0800 /* round up */
+#define FPC_RC_CHOP 0x0c00 /* chop */
+#define FPC_IC 0x1000 /* infinity control (obsolete) */
+#define FPC_IC_PROJ 0x0000 /* projective infinity */
+#define FPC_IC_AFF 0x1000 /* affine infinity (std) */
+
+/*
+ * Status register
+ */
+#define FPS_IE 0x0001 /* invalid operation */
+#define FPS_DE 0x0002 /* denormalized operand */
+#define FPS_ZE 0x0004 /* divide by zero */
+#define FPS_OE 0x0008 /* overflow */
+#define FPS_UE 0x0010 /* underflow */
+#define FPS_PE 0x0020 /* precision */
+#define FPS_SF 0x0040 /* stack flag */
+#define FPS_ES 0x0080 /* error summary */
+#define FPS_C0 0x0100 /* condition code bit 0 */
+#define FPS_C1 0x0200 /* condition code bit 1 */
+#define FPS_C2 0x0400 /* condition code bit 2 */
+#define FPS_TOS 0x3800 /* top-of-stack pointer */
+#define FPS_TOS_SHIFT 11
+#define FPS_C3 0x4000 /* condition code bit 3 */
+#define FPS_BUSY 0x8000 /* FPU busy */
+
+/*
+ * Kind of floating-point support provided by kernel.
+ */
+#define FP_NO 0 /* no floating point */
+#define FP_SOFT 1 /* software FP emulator */
+#define FP_287 2 /* 80287 */
+#define FP_387 3 /* 80387 or 80486 */
+#define FP_387FX 4 /* FXSAVE/RSTOR-capable */
+#define FP_387X 5 /* XSAVE/RSTOR-capable */
+
+#endif /* _MACH_RISCV_FP_REG_H_ */
diff --git a/riscv/include/mach/riscv/kern_return.h b/riscv/include/mach/riscv/kern_return.h
new file mode 100644
index 0000000..9bcd077
--- /dev/null
+++ b/riscv/include/mach/riscv/kern_return.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: kern_return.h
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine-dependent kernel return definitions.
+ */
+
+#ifndef _MACH_RISCV_KERN_RETURN_H_
+#define _MACH_RISCV_KERN_RETURN_H_
+
+#ifndef __ASSEMBLER__
+typedef int kern_return_t;
+#endif /* __ASSEMBLER__ */
+#endif /* _MACH_RISCV_KERN_RETURN_H_ */
diff --git a/riscv/include/mach/riscv/mach_riscv.defs b/riscv/include/mach/riscv/mach_riscv.defs
new file mode 100644
index 0000000..c6275ff
--- /dev/null
+++ b/riscv/include/mach/riscv/mach_riscv.defs
@@ -0,0 +1,113 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Special functions for i386.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+ mach_i386 3800;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+#ifdef MACH_I386_IMPORTS
+MACH_I386_IMPORTS
+#endif
+
+type descriptor_t = struct[2] of uint32_t;
+type descriptor_list_t = array[*] of descriptor_t;
+
+import <mach/machine/mach_riscv_types.h>;
+
+#if KERNEL_SERVER
+simport <machine/io_perm.h>;
+#endif
+
+type io_port_t = MACH_MSG_TYPE_INTEGER_16;
+type io_perm_t = mach_port_t
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ intran: io_perm_t convert_port_to_io_perm(mach_port_t)
+ outtran: mach_port_t convert_io_perm_to_port(io_perm_t)
+ destructor: io_perm_deallocate(io_perm_t)
+#endif /* KERNEL_SERVER */
+ ;
+
+skip; /* i386_io_port_add */
+skip; /* i386_io_port_remove */
+skip; /* i386_io_port_list */
+
+routine i386_set_ldt(
+ target_thread : thread_t;
+ first_selector : int;
+ desc_list : descriptor_list_t, serverCopy);
+
+routine i386_get_ldt(
+ target_thread : thread_t;
+ first_selector : int;
+ selector_count : int;
+ out desc_list : descriptor_list_t);
+
+/* Request a new port IO_PERM that represents the capability to access
+ the I/O ports [FROM; TO] directly. MASTER_PORT is the master device port.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a task,
+ or FROM is greater than TO. */
+routine i386_io_perm_create(
+ master_port : mach_port_t;
+ from : io_port_t;
+ to : io_port_t;
+ out io_perm : io_perm_t);
+
+/* Modify the I/O permissions for TARGET_TASK. If ENABLE is TRUE, the
+ permission to access the I/O ports specified by IO_PERM is granted,
+ otherwise it is withdrawn.
+
+ The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a valid
+ task or IO_PERM not a valid I/O permission port. */
+routine i386_io_perm_modify(
+ target_task : task_t;
+ io_perm : io_perm_t;
+ enable : boolean_t);
+
+/* Modify one of a few available thread-specific segment descriptor slots.
+ The SELECTOR must be a value from a previous call (on any thread),
+ or -1 to allocate an available slot and return the segment selector for it.
+ These slots are copied into the CPU on each thread switch.
+ Returns KERN_NO_SPACE when there are no more slots available. */
+routine i386_set_gdt(
+ target_thread : thread_t;
+ inout selector : int;
+ desc : descriptor_t);
+
+/* Fetch a segment descriptor set with a prior i386_set_gdt call. */
+routine i386_get_gdt(
+ target_thread : thread_t;
+ selector : int;
+ out desc : descriptor_t);
diff --git a/riscv/include/mach/riscv/mach_riscv_types.h b/riscv/include/mach/riscv/mach_riscv_types.h
new file mode 100644
index 0000000..f5177fb
--- /dev/null
+++ b/riscv/include/mach/riscv/mach_riscv_types.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Type definitions for i386 interface routines.
+ */
+
+#ifndef _MACH_MACH_I386_TYPES_H_
+#define _MACH_MACH_I386_TYPES_H_
+
+#ifndef __ASSEMBLER__
+/*
+ * i386 segment descriptor.
+ */
+struct descriptor {
+ unsigned int low_word;
+ unsigned int high_word;
+};
+
+typedef struct descriptor descriptor_t;
+typedef struct descriptor *descriptor_list_t;
+typedef const struct descriptor *const_descriptor_list_t;
+
+#endif /* !__ASSEMBLER__ */
+
+/*
+ * i386 I/O port
+ */
+
+#ifndef MACH_KERNEL
+typedef unsigned short io_port_t;
+typedef mach_port_t io_perm_t;
+#endif /* !MACH_KERNEL */
+
+#endif /* _MACH_MACH_I386_TYPES_H_ */
diff --git a/riscv/include/mach/riscv/machine_types.defs b/riscv/include/mach/riscv/machine_types.defs
new file mode 100755
index 0000000..df719b4
--- /dev/null
+++ b/riscv/include/mach/riscv/machine_types.defs
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: mach/machine/machine_types.defs
+ * Author: Alessandro Forin
+ * Date: 7/92
+ *
+ * Header file for the basic, machine-dependent data types.
+ * Version for 32 bit architectures.
+ *
+ */
+
+#ifndef _MACHINE_MACHINE_TYPES_DEFS_
+#define _MACHINE_MACHINE_TYPES_DEFS_ 1
+
+/*
+ * A natural_t is the type for the native
+ * unsigned integer type, usually 32 bits. It is suitable for
+ * most counters with a small chance of overflow.
+ * While historically natural_t was meant to be the same
+ * as a pointer, that is not the case here.
+ */
+type natural_t = uint32_t;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+type integer_t = int32_t;
+
+/*
+ * long_natural_t and long_integer_t for kernel <-> userland interfaces as the
+ * size depends on the architecture of both kernel and userland.
+ */
+#if defined(KERNEL_SERVER) && defined(USER32)
+type rpc_long_natural_t = uint32_t;
+type rpc_long_integer_t = int32_t;
+#else /* KERNEL and USER32 */
+#if defined(__riscv_xlen) && __riscv_xlen == 64
+type rpc_long_natural_t = uint64_t;
+type rpc_long_integer_t = int64_t;
+#else
+type rpc_long_natural_t = uint32_t;
+type rpc_long_integer_t = int32_t;
+#endif /* __riscv_xlen */
+#endif /* KERNEL_SERVER and USER32 */
+
+/*
+ * A long_natural_t is a possibly larger unsigned integer type than natural_t.
+ * Should be used instead of natural_t when we want the data to be less subject
+ * to overflows.
+ */
+type long_natural_t = rpc_long_natural_t
+#if defined(KERNEL_SERVER)
+ intran: long_natural_t convert_long_natural_from_user(rpc_long_natural_t)
+ outtran: rpc_long_natural_t convert_long_natural_to_user(long_natural_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_long_natural_t
+#endif
+ ;
+
+/*
+ * Larger version of integer_t. Only used when we want to hold possibly larger
+ * values than what is possible with integer_t.
+ */
+type long_integer_t = rpc_long_integer_t
+#if defined(KERNEL_SERVER)
+ intran: long_integer_t convert_long_integer_from_user(rpc_long_integer_t)
+ outtran: rpc_long_integer_t convert_long_integer_to_user(long_integer_t)
+#elif defined(KERNEL_USER)
+ ctype: rpc_long_integer_t
+#endif
+ ;
+
+/*
+ * Physical address size
+ */
+type rpc_phys_addr_t = uint64_t;
+type rpc_phys_addr_array_t = array[] of rpc_phys_addr_t;
+
+#endif /* _MACHINE_MACHINE_TYPES_DEFS_ */
diff --git a/riscv/include/mach/riscv/multiboot.h b/riscv/include/mach/riscv/multiboot.h
new file mode 100644
index 0000000..c3538c1
--- /dev/null
+++ b/riscv/include/mach/riscv/multiboot.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 1995-1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _MACH_I386_MULTIBOOT_H_
+#define _MACH_I386_MULTIBOOT_H_
+
+#include <mach/machine/vm_types.h>
+
+/* The entire multiboot_header must be contained
+ within the first MULTIBOOT_SEARCH bytes of the kernel image. */
+#define MULTIBOOT_SEARCH 8192
+
+/* Magic value identifying the multiboot_header. */
+#define MULTIBOOT_MAGIC 0x1badb002
+
+/* Features flags for 'flags'.
+ If a boot loader sees a flag in MULTIBOOT_MUSTKNOW set
+ and it doesn't understand it, it must fail. */
+#define MULTIBOOT_MUSTKNOW 0x0000ffff
+
+/* Align all boot modules on page (4KB) boundaries. */
+#define MULTIBOOT_PAGE_ALIGN 0x00000001
+
+/* Must be provided memory information in multiboot_raw_info structure */
+#define MULTIBOOT_MEMORY_INFO 0x00000002
+
+/* Use the load address fields above instead of the ones in the a.out header
+ to figure out what to load where, and what to do afterwards.
+ This should only be needed for a.out kernel images
+ (ELF and other formats can generally provide the needed information). */
+#define MULTIBOOT_AOUT_KLUDGE 0x00010000
+
+/* The boot loader passes this value in register EAX to signal the kernel
+ that the multiboot method is being used */
+#define MULTIBOOT_VALID 0x2badb002
+
+
+
+#define MULTIBOOT_MEMORY 0x00000001
+#define MULTIBOOT_BOOT_DEVICE 0x00000002
+#define MULTIBOOT_CMDLINE 0x00000004
+#define MULTIBOOT_MODS 0x00000008
+#define MULTIBOOT_AOUT_SYMS 0x00000010
+#define MULTIBOOT_ELF_SHDR 0x00000020
+#define MULTIBOOT_MEM_MAP 0x00000040
+
+
+/* The mods_addr field above contains the physical address of the first
+ of 'mods_count' multiboot_module structures. */
+struct multiboot_module
+{
+ /* Physical start and end addresses of the module data itself. */
+ vm_offset_t mod_start;
+ vm_offset_t mod_end;
+
+ /* Arbitrary ASCII string associated with the module. */
+ vm_offset_t string;
+
+ /* Boot loader must set to 0; OS must ignore. */
+ unsigned reserved;
+};
+
+#ifdef __x86_64__
+/* The mods_addr field above contains the physical address of the first
+ of 'mods_count' multiboot_module structures. */
+struct multiboot32_module
+{
+ /* Physical start and end addresses of the module data itself. */
+ unsigned mod_start;
+ unsigned mod_end;
+
+ /* Arbitrary ASCII string associated with the module. */
+ unsigned string;
+
+ /* Boot loader must set to 0; OS must ignore. */
+ unsigned reserved;
+};
+#endif
+
+/* usable memory "Type", all others are reserved. */
+#define MB_ARD_MEMORY 1
+
+/*
+ * Copyright (c) 2010, 2012 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Versions used by the biosmem module.
+ */
+
+#include <kern/macros.h>
+
+/*
+ * Magic number provided by the OS to the boot loader.
+ */
+#define MULTIBOOT_OS_MAGIC 0x1badb002
+
+/*
+ * Multiboot flags requesting services from the boot loader.
+ */
+#define MULTIBOOT_OS_MEMORY_INFO 0x2
+
+#define MULTIBOOT_OS_FLAGS MULTIBOOT_OS_MEMORY_INFO
+
+/*
+ * Magic number to identify a multiboot compliant boot loader.
+ */
+#define MULTIBOOT_LOADER_MAGIC 0x2badb002
+
+/*
+ * Multiboot flags set by the boot loader.
+ */
+#define MULTIBOOT_LOADER_MEMORY 0x01
+#define MULTIBOOT_LOADER_CMDLINE 0x04
+#define MULTIBOOT_LOADER_MODULES 0x08
+#define MULTIBOOT_LOADER_SHDR 0x20
+#define MULTIBOOT_LOADER_MMAP 0x40
+
+/*
+ * A multiboot module.
+ */
+struct multiboot_raw_module {
+ uint32_t mod_start;
+ uint32_t mod_end;
+ uint32_t string;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * Memory map entry.
+ */
+struct multiboot_raw_mmap_entry {
+ uint32_t size;
+ uint64_t base_addr;
+ uint64_t length;
+ uint32_t type;
+} __packed;
+
+/*
+ * Multiboot information structure as passed by the boot loader.
+ */
+struct multiboot_raw_info {
+ uint32_t flags;
+ uint32_t mem_lower;
+ uint32_t mem_upper;
+ uint32_t unused0;
+ uint32_t cmdline;
+ uint32_t mods_count;
+ uint32_t mods_addr;
+ uint32_t shdr_num;
+ uint32_t shdr_size;
+ uint32_t shdr_addr;
+ uint32_t shdr_strndx;
+ uint32_t mmap_length;
+ uint32_t mmap_addr;
+ uint32_t unused1[9];
+} __packed;
+
+/*
+ * Versions of the multiboot structures suitable for use with 64-bit pointers.
+ */
+
+struct multiboot_os_module {
+ void *mod_start;
+ void *mod_end;
+ char *string;
+};
+
+struct multiboot_os_info {
+ uint32_t flags;
+ char *cmdline;
+ struct multiboot_module *mods_addr;
+ uint32_t mods_count;
+};
+
+#endif /* _MACH_I386_MULTIBOOT_H_ */
diff --git a/riscv/include/mach/riscv/thread_status.h b/riscv/include/mach/riscv/thread_status.h
new file mode 100644
index 0000000..894b022
--- /dev/null
+++ b/riscv/include/mach/riscv/thread_status.h
@@ -0,0 +1,190 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: thread_status.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * This file contains the structure definitions for the thread
+ * state as applied to RISCV processors.
+ */
+
+#ifndef _MACH_RISCV_THREAD_STATUS_H_
+#define _MACH_RISCV_THREAD_STATUS_H_
+
+#include <mach/machine/fp_reg.h>
+/*
+ * i386_thread_state this is the structure that is exported
+ * to user threads for use in status/mutate
+ * calls. This structure should never
+ * change.
+ *
+ * i386_float_state exported to use threads for access to
+ * floating point registers. Try not to
+ * change this one, either.
+ *
+ * i386_isa_port_map_state exported to user threads to allow
+ * selective in/out operations
+ *
+ */
+
+#define i386_THREAD_STATE 1
+#define i386_FLOAT_STATE 2
+#define i386_ISA_PORT_MAP_STATE 3
+#define i386_V86_ASSIST_STATE 4
+#define i386_REGS_SEGS_STATE 5
+#define i386_DEBUG_STATE 6
+#define i386_FSGS_BASE_STATE 7
+
+/*
+ * This structure is used for both
+ * i386_THREAD_STATE and i386_REGS_SEGS_STATE.
+ */
+struct i386_thread_state {
+#if defined(__x86_64__) && !defined(USER32)
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rdi;
+ uint64_t rsi;
+ uint64_t rbp;
+ uint64_t rsp;
+ uint64_t rbx;
+ uint64_t rdx;
+ uint64_t rcx;
+ uint64_t rax;
+ uint64_t rip;
+#else
+ unsigned int gs;
+ unsigned int fs;
+ unsigned int es;
+ unsigned int ds;
+
+ unsigned int edi;
+ unsigned int esi;
+ unsigned int ebp;
+ unsigned int esp;
+ unsigned int ebx;
+ unsigned int edx;
+ unsigned int ecx;
+ unsigned int eax;
+ unsigned int eip;
+#endif /* __x86_64__ && !USER32 */
+
+ unsigned int cs;
+#if defined(__x86_64__) && !defined(USER32)
+ uint64_t rfl;
+ uint64_t ursp;
+#else
+ unsigned int efl;
+ unsigned int uesp;
+#endif /* __x86_64__ and !USER32 */
+
+ unsigned int ss;
+};
+#define i386_THREAD_STATE_COUNT (sizeof (struct i386_thread_state)/sizeof(unsigned int))
+
+/*
+ * Floating point state.
+ *
+ * fpkind tells in what way floating point operations are supported.
+ * See the values for fp_kind in <mach/i386/fp_reg.h>.
+ *
+ * If the kind is FP_NO, then calls to set the state will fail, and
+ * thread_getstatus will return garbage for the rest of the state.
+ * If "initialized" is false, then the rest of the state is garbage.
+ * Clients can set "initialized" to false to force the coprocessor to
+ * be reset.
+ * "exc_status" is non-zero if the thread has noticed (but not
+ * proceeded from) a coprocessor exception. It contains the status
+ * word with the exception bits set. The status word in "fp_status"
+ * will have the exception bits turned off. If an exception bit in
+ * "fp_status" is turned on, then "exc_status" should be zero. This
+ * happens when the coprocessor exception is noticed after the system
+ * has context switched to some other thread.
+ *
+ * If kind is FP_387, then "state" is a i387_state. Other kinds might
+ * also use i387_state, but somebody will have to verify it (XXX).
+ * Note that the registers are ordered from top-of-stack down, not
+ * according to physical register number.
+ */
+
+#define FP_STATE_BYTES \
+ (sizeof (struct i386_fp_save) + sizeof (struct i386_fp_regs))
+
+struct i386_float_state {
+ int fpkind; /* FP_NO..FP_387X (readonly) */
+ int initialized;
+ unsigned char hw_state[FP_STATE_BYTES]; /* actual "hardware" state */
+ int exc_status; /* exception status (readonly) */
+};
+#define i386_FLOAT_STATE_COUNT (sizeof(struct i386_float_state)/sizeof(unsigned int))
+
+
+#define PORT_MAP_BITS 0x400
+struct i386_isa_port_map_state {
+ unsigned char pm[PORT_MAP_BITS>>3];
+};
+
+#define i386_ISA_PORT_MAP_STATE_COUNT (sizeof(struct i386_isa_port_map_state)/sizeof(unsigned int))
+
+/*
+ * V8086 assist supplies a pointer to an interrupt
+ * descriptor table in task space.
+ */
+struct i386_v86_assist_state {
+ unsigned int int_table; /* interrupt table address */
+ int int_count; /* interrupt table size */
+};
+
+struct v86_interrupt_table {
+ unsigned int count; /* count of pending interrupts */
+ unsigned short mask; /* ignore this interrupt if true */
+ unsigned short vec; /* vector to take */
+};
+
+#define i386_V86_ASSIST_STATE_COUNT \
+ (sizeof(struct i386_v86_assist_state)/sizeof(unsigned int))
+
+struct i386_debug_state {
+ unsigned int dr[8];
+};
+#define i386_DEBUG_STATE_COUNT \
+ (sizeof(struct i386_debug_state)/sizeof(unsigned int))
+
+struct i386_fsgs_base_state {
+ unsigned long fs_base;
+ unsigned long gs_base;
+};
+#define i386_FSGS_BASE_STATE_COUNT \
+ (sizeof(struct i386_fsgs_base_state)/sizeof(unsigned int))
+
+#endif /* _MACH_RISCV_THREAD_STATUS_H_ */
diff --git a/riscv/include/mach/riscv/trap.h b/riscv/include/mach/riscv/trap.h
new file mode 100644
index 0000000..6d83723
--- /dev/null
+++ b/riscv/include/mach/riscv/trap.h
@@ -0,0 +1,60 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _MACH_RISCV_TRAP_H_
+#define _MACH_RISCV_TRAP_H_
+
+/*
+ * Hardware trap vectors for i386.
+ */
+#define T_DIVIDE_ERROR 0
+#define T_DEBUG 1
+#define T_NMI 2 /* non-maskable interrupt */
+#define T_INT3 3 /* int 3 instruction */
+#define T_OVERFLOW 4 /* overflow test */
+#define T_OUT_OF_BOUNDS 5 /* bounds check */
+#define T_INVALID_OPCODE 6 /* invalid op code */
+#define T_NO_FPU 7 /* no floating point */
+#define T_DOUBLE_FAULT 8 /* double fault */
+#define T_FPU_FAULT 9
+/* 10 */
+#define T_SEGMENT_NOT_PRESENT 11
+#define T_STACK_FAULT 12
+#define T_GENERAL_PROTECTION 13
+#define T_PAGE_FAULT 14
+/* 15 */
+#define T_FLOATING_POINT_ERROR 16
+#define T_WATCHPOINT 17
+
+/*
+ * Page-fault trap codes.
+ */
+#define T_PF_PROT 0x1 /* protection violation */
+#define T_PF_WRITE 0x2 /* write access */
+#define T_PF_USER 0x4 /* from user state */
+
+
+#endif /* _MACH_RISCV_TRAP_H_ */
diff --git a/riscv/include/mach/riscv/vm_param.h b/riscv/include/mach/riscv/vm_param.h
new file mode 100644
index 0000000..3e5c18c
--- /dev/null
+++ b/riscv/include/mach/riscv/vm_param.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_param.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * I386 machine dependent virtual memory parameters.
+ * Most of the declarations are preceded by I386_ (or i386_)
+ * which is OK because only I386 specific code will be using
+ * them.
+ */
+
+#ifndef _MACH_I386_VM_PARAM_H_
+#define _MACH_I386_VM_PARAM_H_
+
+#include <mach/machine/vm_types.h>
+
+#define BYTE_SIZE 8 /* byte size in bits */
+
+#define I386_PGBYTES 4096 /* bytes per 80386 page */
+#define I386_PGSHIFT 12 /* number of bits to shift for pages */
+
+/* Virtual page size is the same as real page size - 4K is big enough. */
+#define PAGE_SHIFT I386_PGSHIFT
+
+/*
+ * Convert bytes to pages and convert pages to bytes.
+ * No rounding is used.
+ */
+
+#define i386_btop(x) (((phys_addr_t)(x)) >> I386_PGSHIFT)
+#define i386_ptob(x) (((phys_addr_t)(x)) << I386_PGSHIFT)
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.)
+ */
+
+#define i386_round_page(x) ((((phys_addr_t)(x)) + I386_PGBYTES - 1) & \
+ ~(I386_PGBYTES-1))
+#define i386_trunc_page(x) (((phys_addr_t)(x)) & ~(I386_PGBYTES-1))
+
+/* User address spaces are 3GB each on a 32-bit kernel, starting at
+ virtual and linear address 0.
+ On a 64-bit krenel we split the address space in half, with the
+ lower 128TB for the user address space and the upper 128TB for the
+ kernel address space.
+
+ On a 32-bit kernel VM_MAX_ADDRESS can be reduced to leave more
+ space for the kernel, but must not be increased to more than 3GB as
+ glibc and hurd servers would not cope with that.
+ */
+#define VM_MIN_ADDRESS (0ULL)
+
+#ifdef __x86_64__
+#if defined(KERNEL) && defined(USER32)
+#define VM_MAX_ADDRESS (0xc0000000UL)
+#else /* defined(KERNEL) && defined(USER32) */
+#define VM_MAX_ADDRESS (0x800000000000ULL)
+#endif /* defined(KERNEL) && defined(USER32) */
+#else /* __x86_64__ */
+#define VM_MAX_ADDRESS (0xc0000000UL)
+#endif /* __x86_64__ */
+
+#endif /* _MACH_I386_VM_PARAM_H_ */
diff --git a/riscv/include/mach/riscv/vm_types.h b/riscv/include/mach/riscv/vm_types.h
new file mode 100644
index 0000000..8f528ae
--- /dev/null
+++ b/riscv/include/mach/riscv/vm_types.h
@@ -0,0 +1,173 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992,1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: vm_types.h
+ * Author: Avadis Tevanian, Jr.
+ * Date: 1985
+ *
+ * Header file for VM data types. I386 version.
+ */
+
+#ifndef _MACHINE_VM_TYPES_H_
+#define _MACHINE_VM_TYPES_H_ 1
+
+#ifdef __ASSEMBLER__
+#else /* __ASSEMBLER__ */
+
+#include <stdint.h>
+
+#ifdef MACH_KERNEL
+#include <kern/assert.h>
+#endif
+
+/*
+ * A natural_t is the type for the native
+ * unsigned integer type, usually 32 bits. It is suitable for
+ * most counters with a small chance of overflow.
+ * While historically natural_t was meant to be the same
+ * as a pointer, that is not the case here.
+ */
+typedef unsigned int natural_t;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+typedef int integer_t;
+
+/*
+ * A long_natural_t is a possibly larger unsigned integer type than natural_t.
+ * Should be used instead of natural_t when we want the data to be less subject
+ * to overflows.
+ */
+typedef unsigned long long_natural_t;
+
+/*
+ * Larger version of integer_t. Only used when we want to hold possibly larger
+ * values than what is possible with integer_t.
+ */
+typedef long long_integer_t;
+
+/*
+ * A vm_offset_t is a type-neutral pointer,
+ * e.g. an offset into a virtual memory space.
+ */
+typedef uintptr_t vm_offset_t;
+typedef vm_offset_t * vm_offset_array_t;
+
+/*
+ * A type for physical addresses.
+ */
+#ifdef MACH_KERNEL
+#ifdef PAE
+typedef unsigned long long phys_addr_t;
+#else /* PAE */
+typedef unsigned long phys_addr_t;
+#endif /* PAE */
+#else
+typedef unsigned long long phys_addr_t;
+#endif
+typedef unsigned long long rpc_phys_addr_t;
+typedef rpc_phys_addr_t *rpc_phys_addr_array_t;
+
+/*
+ * A vm_size_t is the proper type for e.g.
+ * expressing the difference between two
+ * vm_offset_t entities.
+ */
+typedef uintptr_t vm_size_t;
+typedef vm_size_t * vm_size_array_t;
+
+/*
+ * rpc_types are for user/kernel interfaces. On kernel side they may differ from
+ * the native types, while on user space they shall be the same.
+ * These three types are always of the same size, so we can reuse the conversion
+ * functions.
+ */
+#if defined(MACH_KERNEL) && defined(USER32)
+typedef uint32_t rpc_uintptr_t;
+typedef uint32_t rpc_vm_address_t;
+typedef uint32_t rpc_vm_offset_t;
+typedef uint32_t rpc_vm_size_t;
+
+static inline uint64_t convert_vm_from_user(uint32_t uaddr)
+{
+ return (uint64_t)uaddr;
+}
+static inline uint32_t convert_vm_to_user(uint64_t kaddr)
+{
+ assert(kaddr <= 0xFFFFFFFF);
+ return (uint32_t)kaddr;
+}
+
+typedef uint32_t rpc_long_natural_t;
+typedef int32_t rpc_long_integer_t;
+
+static inline int64_t convert_long_integer_from_user(int32_t i)
+{
+ return (int64_t)i;
+}
+static inline int32_t convert_long_integer_to_user(int64_t i)
+{
+ assert(i <= 0x7FFFFFFF);
+ return (int32_t)i;
+}
+typedef uint32_t rpc_long_natural_t;
+typedef int32_t rpc_long_integer_t;
+#else /* MACH_KERNEL */
+typedef uintptr_t rpc_uintptr_t;
+typedef vm_offset_t rpc_vm_address_t;
+typedef vm_offset_t rpc_vm_offset_t;
+typedef vm_size_t rpc_vm_size_t;
+
+#define convert_vm_to_user null_conversion
+#define convert_vm_from_user null_conversion
+
+typedef long_natural_t rpc_long_natural_t;
+typedef long_integer_t rpc_long_integer_t;
+
+#define convert_long_integer_to_user null_conversion
+#define convert_long_integer_from_user null_conversion
+#endif /* MACH_KERNEL */
+
+#define convert_long_natural_to_user convert_vm_to_user
+#define convert_long_natural_from_user convert_vm_from_user
+
+typedef rpc_vm_size_t * rpc_vm_size_array_t;
+typedef rpc_vm_offset_t * rpc_vm_offset_array_t;
+
+#endif /* __ASSEMBLER__ */
+
+/*
+ * If composing messages by hand (please dont)
+ */
+
+#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32
+
+#endif /* _MACHINE_VM_TYPES_H_ */
diff --git a/riscv/include/uapi/asm/Kbuild b/riscv/include/uapi/asm/Kbuild
new file mode 100644
index 0000000..f66554c
--- /dev/null
+++ b/riscv/include/uapi/asm/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0
diff --git a/riscv/include/uapi/asm/auxvec.h b/riscv/include/uapi/asm/auxvec.h
new file mode 100644
index 0000000..10aaa83
--- /dev/null
+++ b/riscv/include/uapi/asm/auxvec.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_AUXVEC_H
+#define _UAPI_ASM_RISCV_AUXVEC_H
+
+/* vDSO location */
+#define AT_SYSINFO_EHDR 33
+
+/*
+ * The set of entries below represent more extensive information
+ * about the caches, in the form of two entry per cache type,
+ * one entry containing the cache size in bytes, and the other
+ * containing the cache line size in bytes in the bottom 16 bits
+ * and the cache associativity in the next 16 bits.
+ *
+ * The associativity is such that if N is the 16-bit value, the
+ * cache is N way set associative. A value if 0xffff means fully
+ * associative, a value of 1 means directly mapped.
+ *
+ * For all these fields, a value of 0 means that the information
+ * is not known.
+ */
+#define AT_L1I_CACHESIZE 40
+#define AT_L1I_CACHEGEOMETRY 41
+#define AT_L1D_CACHESIZE 42
+#define AT_L1D_CACHEGEOMETRY 43
+#define AT_L2_CACHESIZE 44
+#define AT_L2_CACHEGEOMETRY 45
+#define AT_L3_CACHESIZE 46
+#define AT_L3_CACHEGEOMETRY 47
+
+/* entries in ARCH_DLINFO */
+#define AT_VECTOR_SIZE_ARCH 9
+#define AT_MINSIGSTKSZ 51
+
+#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/riscv/include/uapi/asm/bitsperlong.h b/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 0000000..7d0b32e
--- /dev/null
+++ b/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/riscv/include/uapi/asm/bpf_perf_event.h b/riscv/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 0000000..6cb1c28
--- /dev/null
+++ b/riscv/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_regs_struct bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/riscv/include/uapi/asm/byteorder.h b/riscv/include/uapi/asm/byteorder.h
new file mode 100644
index 0000000..f671e16
--- /dev/null
+++ b/riscv/include/uapi/asm/byteorder.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_BYTEORDER_H
+#define _UAPI_ASM_RISCV_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _UAPI_ASM_RISCV_BYTEORDER_H */
diff --git a/riscv/include/uapi/asm/elf.h b/riscv/include/uapi/asm/elf.h
new file mode 100644
index 0000000..11a71b8
--- /dev/null
+++ b/riscv/include/uapi/asm/elf.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UAPI_ASM_RISCV_ELF_H
+#define _UAPI_ASM_RISCV_ELF_H
+
+#include <asm/ptrace.h>
+
+/* ELF register definitions */
+typedef unsigned long elf_greg_t;
+typedef struct user_regs_struct elf_gregset_t;
+#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t))
+
+/* We don't support f without d, or q. */
+typedef __u64 elf_fpreg_t;
+typedef union __riscv_fp_state elf_fpregset_t;
+#define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t))
+
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info)
+#endif
+
+/*
+ * RISC-V relocation types
+ */
+
+/* Relocation types used by the dynamic linker */
+#define R_RISCV_NONE 0
+#define R_RISCV_32 1
+#define R_RISCV_64 2
+#define R_RISCV_RELATIVE 3
+#define R_RISCV_COPY 4
+#define R_RISCV_JUMP_SLOT 5
+#define R_RISCV_TLS_DTPMOD32 6
+#define R_RISCV_TLS_DTPMOD64 7
+#define R_RISCV_TLS_DTPREL32 8
+#define R_RISCV_TLS_DTPREL64 9
+#define R_RISCV_TLS_TPREL32 10
+#define R_RISCV_TLS_TPREL64 11
+#define R_RISCV_IRELATIVE 58
+
+/* Relocation types not used by the dynamic linker */
+#define R_RISCV_BRANCH 16
+#define R_RISCV_JAL 17
+#define R_RISCV_CALL 18
+#define R_RISCV_CALL_PLT 19
+#define R_RISCV_GOT_HI20 20
+#define R_RISCV_TLS_GOT_HI20 21
+#define R_RISCV_TLS_GD_HI20 22
+#define R_RISCV_PCREL_HI20 23
+#define R_RISCV_PCREL_LO12_I 24
+#define R_RISCV_PCREL_LO12_S 25
+#define R_RISCV_HI20 26
+#define R_RISCV_LO12_I 27
+#define R_RISCV_LO12_S 28
+#define R_RISCV_TPREL_HI20 29
+#define R_RISCV_TPREL_LO12_I 30
+#define R_RISCV_TPREL_LO12_S 31
+#define R_RISCV_TPREL_ADD 32
+#define R_RISCV_ADD8 33
+#define R_RISCV_ADD16 34
+#define R_RISCV_ADD32 35
+#define R_RISCV_ADD64 36
+#define R_RISCV_SUB8 37
+#define R_RISCV_SUB16 38
+#define R_RISCV_SUB32 39
+#define R_RISCV_SUB64 40
+#define R_RISCV_GNU_VTINHERIT 41
+#define R_RISCV_GNU_VTENTRY 42
+#define R_RISCV_ALIGN 43
+#define R_RISCV_RVC_BRANCH 44
+#define R_RISCV_RVC_JUMP 45
+#define R_RISCV_GPREL_I 47
+#define R_RISCV_GPREL_S 48
+#define R_RISCV_TPREL_I 49
+#define R_RISCV_TPREL_S 50
+#define R_RISCV_RELAX 51
+#define R_RISCV_SUB6 52
+#define R_RISCV_SET6 53
+#define R_RISCV_SET8 54
+#define R_RISCV_SET16 55
+#define R_RISCV_SET32 56
+#define R_RISCV_32_PCREL 57
+#define R_RISCV_PLT32 59
+#define R_RISCV_SET_ULEB128 60
+#define R_RISCV_SUB_ULEB128 61
+
+
+#endif /* _UAPI_ASM_RISCV_ELF_H */
diff --git a/riscv/include/uapi/asm/hwcap.h b/riscv/include/uapi/asm/hwcap.h
new file mode 100644
index 0000000..c52bb7b
--- /dev/null
+++ b/riscv/include/uapi/asm/hwcap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copied from arch/arm64/include/asm/hwcap.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ */
+#ifndef _UAPI_ASM_RISCV_HWCAP_H
+#define _UAPI_ASM_RISCV_HWCAP_H
+
+/*
+ * Linux saves the floating-point registers according to the ISA Linux is
+ * executing on, as opposed to the ISA the user program is compiled for. This
+ * is necessary for a handful of esoteric use cases: for example, userspace
+ * threading libraries must be able to examine the actual machine state in
+ * order to fully reconstruct the state of a thread.
+ */
+#define COMPAT_HWCAP_ISA_I (1 << ('I' - 'A'))
+#define COMPAT_HWCAP_ISA_M (1 << ('M' - 'A'))
+#define COMPAT_HWCAP_ISA_A (1 << ('A' - 'A'))
+#define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A'))
+#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
+#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
+#define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A'))
+
+#endif /* _UAPI_ASM_RISCV_HWCAP_H */
diff --git a/riscv/include/uapi/asm/hwprobe.h b/riscv/include/uapi/asm/hwprobe.h
new file mode 100644
index 0000000..b659ffc
--- /dev/null
+++ b/riscv/include/uapi/asm/hwprobe.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2023 Rivos, Inc
+ */
+
+#ifndef _UAPI_ASM_HWPROBE_H
+#define _UAPI_ASM_HWPROBE_H
+
+#include <linux/types.h>
+
+/*
+ * Interface for probing hardware capabilities from userspace, see
+ * Documentation/arch/riscv/hwprobe.rst for more information.
+ */
+struct riscv_hwprobe {
+ __s64 key;
+ __u64 value;
+};
+
+#define RISCV_HWPROBE_KEY_MVENDORID 0
+#define RISCV_HWPROBE_KEY_MARCHID 1
+#define RISCV_HWPROBE_KEY_MIMPID 2
+#define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
+#define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
+#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
+#define RISCV_HWPROBE_IMA_FD (1 << 0)
+#define RISCV_HWPROBE_IMA_C (1 << 1)
+#define RISCV_HWPROBE_IMA_V (1 << 2)
+#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
+#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
+#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
+#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
+#define RISCV_HWPROBE_KEY_CPUPERF_0 5
+#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
+#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
+#define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
+#define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
+#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
+#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
+#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
+/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
+
+#endif
diff --git a/riscv/include/uapi/asm/kvm.h b/riscv/include/uapi/asm/kvm.h
new file mode 100644
index 0000000..60d3b21
--- /dev/null
+++ b/riscv/include/uapi/asm/kvm.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#ifndef __LINUX_KVM_RISCV_H
+#define __LINUX_KVM_RISCV_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/bitsperlong.h>
+#include <asm/ptrace.h>
+
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+#define KVM_INTERRUPT_SET -1U
+#define KVM_INTERRUPT_UNSET -2U
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+};
+
+/* KVM Debug exit structure */
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+};
+
+/* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_config {
+ unsigned long isa;
+ unsigned long zicbom_block_size;
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+ unsigned long zicboz_block_size;
+ unsigned long satp_mode;
+};
+
+/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_core {
+ struct user_regs_struct regs;
+ unsigned long mode;
+};
+
+/* Possible privilege modes for kvm_riscv_core */
+#define KVM_RISCV_MODE_S 1
+#define KVM_RISCV_MODE_U 0
+
+/* General CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_csr {
+ unsigned long sstatus;
+ unsigned long sie;
+ unsigned long stvec;
+ unsigned long sscratch;
+ unsigned long sepc;
+ unsigned long scause;
+ unsigned long stval;
+ unsigned long sip;
+ unsigned long satp;
+ unsigned long scounteren;
+ unsigned long senvcfg;
+};
+
+/* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_aia_csr {
+ unsigned long siselect;
+ unsigned long iprio1;
+ unsigned long iprio2;
+ unsigned long sieh;
+ unsigned long siph;
+ unsigned long iprio1h;
+ unsigned long iprio2h;
+};
+
+/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_smstateen_csr {
+ unsigned long sstateen0;
+};
+
+/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
+struct kvm_riscv_timer {
+ __u64 frequency;
+ __u64 time;
+ __u64 compare;
+ __u64 state;
+};
+
+/*
+ * ISA extension IDs specific to KVM. This is not the same as the host ISA
+ * extension IDs as that is internal to the host and should not be exposed
+ * to the guest. This should always be contiguous to keep the mapping simple
+ * in KVM implementation.
+ */
+enum KVM_RISCV_ISA_EXT_ID {
+ KVM_RISCV_ISA_EXT_A = 0,
+ KVM_RISCV_ISA_EXT_C,
+ KVM_RISCV_ISA_EXT_D,
+ KVM_RISCV_ISA_EXT_F,
+ KVM_RISCV_ISA_EXT_H,
+ KVM_RISCV_ISA_EXT_I,
+ KVM_RISCV_ISA_EXT_M,
+ KVM_RISCV_ISA_EXT_SVPBMT,
+ KVM_RISCV_ISA_EXT_SSTC,
+ KVM_RISCV_ISA_EXT_SVINVAL,
+ KVM_RISCV_ISA_EXT_ZIHINTPAUSE,
+ KVM_RISCV_ISA_EXT_ZICBOM,
+ KVM_RISCV_ISA_EXT_ZICBOZ,
+ KVM_RISCV_ISA_EXT_ZBB,
+ KVM_RISCV_ISA_EXT_SSAIA,
+ KVM_RISCV_ISA_EXT_V,
+ KVM_RISCV_ISA_EXT_SVNAPOT,
+ KVM_RISCV_ISA_EXT_ZBA,
+ KVM_RISCV_ISA_EXT_ZBS,
+ KVM_RISCV_ISA_EXT_ZICNTR,
+ KVM_RISCV_ISA_EXT_ZICSR,
+ KVM_RISCV_ISA_EXT_ZIFENCEI,
+ KVM_RISCV_ISA_EXT_ZIHPM,
+ KVM_RISCV_ISA_EXT_SMSTATEEN,
+ KVM_RISCV_ISA_EXT_ZICOND,
+ KVM_RISCV_ISA_EXT_MAX,
+};
+
+/*
+ * SBI extension IDs specific to KVM. This is not the same as the SBI
+ * extension IDs defined by the RISC-V SBI specification.
+ */
+enum KVM_RISCV_SBI_EXT_ID {
+ KVM_RISCV_SBI_EXT_V01 = 0,
+ KVM_RISCV_SBI_EXT_TIME,
+ KVM_RISCV_SBI_EXT_IPI,
+ KVM_RISCV_SBI_EXT_RFENCE,
+ KVM_RISCV_SBI_EXT_SRST,
+ KVM_RISCV_SBI_EXT_HSM,
+ KVM_RISCV_SBI_EXT_PMU,
+ KVM_RISCV_SBI_EXT_EXPERIMENTAL,
+ KVM_RISCV_SBI_EXT_VENDOR,
+ KVM_RISCV_SBI_EXT_DBCN,
+ KVM_RISCV_SBI_EXT_MAX,
+};
+
+/* Possible states for kvm_riscv_timer */
+#define KVM_RISCV_TIMER_STATE_OFF 0
+#define KVM_RISCV_TIMER_STATE_ON 1
+
+#define KVM_REG_SIZE(id) \
+ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+/* If you need to interpret the index values, here is the key: */
+#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
+#define KVM_REG_RISCV_TYPE_SHIFT 24
+#define KVM_REG_RISCV_SUBTYPE_MASK 0x0000000000FF0000
+#define KVM_REG_RISCV_SUBTYPE_SHIFT 16
+
+/* Config registers are mapped as type 1 */
+#define KVM_REG_RISCV_CONFIG (0x01 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_CONFIG_REG(name) \
+ (offsetof(struct kvm_riscv_config, name) / sizeof(unsigned long))
+
+/* Core registers are mapped as type 2 */
+#define KVM_REG_RISCV_CORE (0x02 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_CORE_REG(name) \
+ (offsetof(struct kvm_riscv_core, name) / sizeof(unsigned long))
+
+/* Control and status registers are mapped as type 3 */
+#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_CSR_REG(name) \
+ (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_CSR_AIA_REG(name) \
+ (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \
+ (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long))
+
+/* Timer registers are mapped as type 4 */
+#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_TIMER_REG(name) \
+ (offsetof(struct kvm_riscv_timer, name) / sizeof(__u64))
+
+/* F extension registers are mapped as type 5 */
+#define KVM_REG_RISCV_FP_F (0x05 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_FP_F_REG(name) \
+ (offsetof(struct __riscv_f_ext_state, name) / sizeof(__u32))
+
+/* D extension registers are mapped as type 6 */
+#define KVM_REG_RISCV_FP_D (0x06 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_FP_D_REG(name) \
+ (offsetof(struct __riscv_d_ext_state, name) / sizeof(__u64))
+
+/* ISA Extension registers are mapped as type 7 */
+#define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_SINGLE (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_EN (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_DIS (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_REG(__ext_id) \
+ ((__ext_id) / __BITS_PER_LONG)
+#define KVM_REG_RISCV_ISA_MULTI_MASK(__ext_id) \
+ (1UL << ((__ext_id) % __BITS_PER_LONG))
+#define KVM_REG_RISCV_ISA_MULTI_REG_LAST \
+ KVM_REG_RISCV_ISA_MULTI_REG(KVM_RISCV_ISA_EXT_MAX - 1)
+
+/* SBI extension registers are mapped as type 8 */
+#define KVM_REG_RISCV_SBI_EXT (0x08 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_SINGLE (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_MULTI_EN (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_MULTI_DIS (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_SBI_MULTI_REG(__ext_id) \
+ ((__ext_id) / __BITS_PER_LONG)
+#define KVM_REG_RISCV_SBI_MULTI_MASK(__ext_id) \
+ (1UL << ((__ext_id) % __BITS_PER_LONG))
+#define KVM_REG_RISCV_SBI_MULTI_REG_LAST \
+ KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1)
+
+/* V extension registers are mapped as type 9 */
+#define KVM_REG_RISCV_VECTOR (0x09 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \
+ (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long))
+#define KVM_REG_RISCV_VECTOR_REG(n) \
+ ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
+
+/* Device Control API: RISC-V AIA */
+#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
+#define KVM_DEV_RISCV_APLIC_SIZE 0x4000
+#define KVM_DEV_RISCV_APLIC_MAX_HARTS 0x4000
+#define KVM_DEV_RISCV_IMSIC_ALIGN 0x1000
+#define KVM_DEV_RISCV_IMSIC_SIZE 0x1000
+
+#define KVM_DEV_RISCV_AIA_GRP_CONFIG 0
+#define KVM_DEV_RISCV_AIA_CONFIG_MODE 0
+#define KVM_DEV_RISCV_AIA_CONFIG_IDS 1
+#define KVM_DEV_RISCV_AIA_CONFIG_SRCS 2
+#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS 3
+#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT 4
+#define KVM_DEV_RISCV_AIA_CONFIG_HART_BITS 5
+#define KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS 6
+
+/*
+ * Modes of RISC-V AIA device:
+ * 1) EMUL (aka Emulation): Trap-n-emulate IMSIC
+ * 2) HWACCEL (aka HW Acceleration): Virtualize IMSIC using IMSIC guest files
+ * 3) AUTO (aka Automatic): Virtualize IMSIC using IMSIC guest files whenever
+ * available otherwise fallback to trap-n-emulation
+ */
+#define KVM_DEV_RISCV_AIA_MODE_EMUL 0
+#define KVM_DEV_RISCV_AIA_MODE_HWACCEL 1
+#define KVM_DEV_RISCV_AIA_MODE_AUTO 2
+
+#define KVM_DEV_RISCV_AIA_IDS_MIN 63
+#define KVM_DEV_RISCV_AIA_IDS_MAX 2048
+#define KVM_DEV_RISCV_AIA_SRCS_MAX 1024
+#define KVM_DEV_RISCV_AIA_GROUP_BITS_MAX 8
+#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN 24
+#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX 56
+#define KVM_DEV_RISCV_AIA_HART_BITS_MAX 16
+#define KVM_DEV_RISCV_AIA_GUEST_BITS_MAX 8
+
+#define KVM_DEV_RISCV_AIA_GRP_ADDR 1
+#define KVM_DEV_RISCV_AIA_ADDR_APLIC 0
+#define KVM_DEV_RISCV_AIA_ADDR_IMSIC(__vcpu) (1 + (__vcpu))
+#define KVM_DEV_RISCV_AIA_ADDR_MAX \
+ (1 + KVM_DEV_RISCV_APLIC_MAX_HARTS)
+
+#define KVM_DEV_RISCV_AIA_GRP_CTRL 2
+#define KVM_DEV_RISCV_AIA_CTRL_INIT 0
+
+/*
+ * The device attribute type contains the memory mapped offset of the
+ * APLIC register (range 0x0000-0x3FFF) and it must be 4-byte aligned.
+ */
+#define KVM_DEV_RISCV_AIA_GRP_APLIC 3
+
+/*
+ * The lower 12-bits of the device attribute type contains the iselect
+ * value of the IMSIC register (range 0x70-0xFF) whereas the higher order
+ * bits contains the VCPU id.
+ */
+#define KVM_DEV_RISCV_AIA_GRP_IMSIC 4
+#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS 12
+#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK \
+ ((1U << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) - 1)
+#define KVM_DEV_RISCV_AIA_IMSIC_MKATTR(__vcpu, __isel) \
+ (((__vcpu) << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) | \
+ ((__isel) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK))
+#define KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(__attr) \
+ ((__attr) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK)
+#define KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(__attr) \
+ ((__attr) >> KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS)
+
+/* One single KVM irqchip, ie. the AIA */
+#define KVM_NR_IRQCHIPS 1
+
+#endif
+
+#endif /* __LINUX_KVM_RISCV_H */
diff --git a/riscv/include/uapi/asm/perf_regs.h b/riscv/include/uapi/asm/perf_regs.h
new file mode 100644
index 0000000..196f964
--- /dev/null
+++ b/riscv/include/uapi/asm/perf_regs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
+
+#ifndef _ASM_RISCV_PERF_REGS_H
+#define _ASM_RISCV_PERF_REGS_H
+
+enum perf_event_riscv_regs {
+ PERF_REG_RISCV_PC,
+ PERF_REG_RISCV_RA,
+ PERF_REG_RISCV_SP,
+ PERF_REG_RISCV_GP,
+ PERF_REG_RISCV_TP,
+ PERF_REG_RISCV_T0,
+ PERF_REG_RISCV_T1,
+ PERF_REG_RISCV_T2,
+ PERF_REG_RISCV_S0,
+ PERF_REG_RISCV_S1,
+ PERF_REG_RISCV_A0,
+ PERF_REG_RISCV_A1,
+ PERF_REG_RISCV_A2,
+ PERF_REG_RISCV_A3,
+ PERF_REG_RISCV_A4,
+ PERF_REG_RISCV_A5,
+ PERF_REG_RISCV_A6,
+ PERF_REG_RISCV_A7,
+ PERF_REG_RISCV_S2,
+ PERF_REG_RISCV_S3,
+ PERF_REG_RISCV_S4,
+ PERF_REG_RISCV_S5,
+ PERF_REG_RISCV_S6,
+ PERF_REG_RISCV_S7,
+ PERF_REG_RISCV_S8,
+ PERF_REG_RISCV_S9,
+ PERF_REG_RISCV_S10,
+ PERF_REG_RISCV_S11,
+ PERF_REG_RISCV_T3,
+ PERF_REG_RISCV_T4,
+ PERF_REG_RISCV_T5,
+ PERF_REG_RISCV_T6,
+ PERF_REG_RISCV_MAX,
+};
+#endif /* _ASM_RISCV_PERF_REGS_H */
diff --git a/riscv/include/uapi/asm/ptrace.h b/riscv/include/uapi/asm/ptrace.h
new file mode 100644
index 0000000..a38268b
--- /dev/null
+++ b/riscv/include/uapi/asm/ptrace.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_PTRACE_H
+#define _UAPI_ASM_RISCV_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define PTRACE_GETFDPIC 33
+
+#define PTRACE_GETFDPIC_EXEC 0
+#define PTRACE_GETFDPIC_INTERP 1
+
+/*
+ * User-mode register state for core dumps, ptrace, sigcontext
+ *
+ * This decouples struct pt_regs from the userspace ABI.
+ * struct user_regs_struct must form a prefix of struct pt_regs.
+ */
+struct user_regs_struct {
+ unsigned long pc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+};
+
+struct __riscv_f_ext_state {
+ __u32 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_d_ext_state {
+ __u64 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_q_ext_state {
+ __u64 f[64] __attribute__((aligned(16)));
+ __u32 fcsr;
+ /*
+ * Reserved for expansion of sigcontext structure. Currently zeroed
+ * upon signal, and must be zero upon sigreturn.
+ */
+ __u32 reserved[3];
+};
+
+struct __riscv_ctx_hdr {
+ __u32 magic;
+ __u32 size;
+};
+
+struct __riscv_extra_ext_header {
+ __u32 __padding[129] __attribute__((aligned(16)));
+ /*
+ * Reserved for expansion of sigcontext structure. Currently zeroed
+ * upon signal, and must be zero upon sigreturn.
+ */
+ __u32 reserved;
+ struct __riscv_ctx_hdr hdr;
+};
+
+union __riscv_fp_state {
+ struct __riscv_f_ext_state f;
+ struct __riscv_d_ext_state d;
+ struct __riscv_q_ext_state q;
+};
+
+struct __riscv_v_ext_state {
+ unsigned long vstart;
+ unsigned long vl;
+ unsigned long vtype;
+ unsigned long vcsr;
+ unsigned long vlenb;
+ void *datap;
+ /*
+ * In signal handler, datap will be set a correct user stack offset
+ * and vector registers will be copied to the address of datap
+ * pointer.
+ */
+};
+
+struct __riscv_v_regset_state {
+ unsigned long vstart;
+ unsigned long vl;
+ unsigned long vtype;
+ unsigned long vcsr;
+ unsigned long vlenb;
+ char vreg[];
+};
+
+/*
+ * According to spec: The number of bits in a single vector register,
+ * VLEN >= ELEN, which must be a power of 2, and must be no greater than
+ * 2^16 = 65536bits = 8192bytes
+ */
+#define RISCV_MAX_VLENB (8192)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_RISCV_PTRACE_H */
diff --git a/riscv/include/uapi/asm/setup.h b/riscv/include/uapi/asm/setup.h
new file mode 100644
index 0000000..66b13a5
--- /dev/null
+++ b/riscv/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _UAPI_ASM_RISCV_SETUP_H
+#define _UAPI_ASM_RISCV_SETUP_H
+
+#define COMMAND_LINE_SIZE 1024
+
+#endif /* _UAPI_ASM_RISCV_SETUP_H */
diff --git a/riscv/include/uapi/asm/sigcontext.h b/riscv/include/uapi/asm/sigcontext.h
new file mode 100644
index 0000000..cd4f175
--- /dev/null
+++ b/riscv/include/uapi/asm/sigcontext.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_SIGCONTEXT_H
+#define _UAPI_ASM_RISCV_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/* The Magic number for signal context frame header. */
+#define RISCV_V_MAGIC 0x53465457
+#define END_MAGIC 0x0
+
+/* The size of END signal context header. */
+#define END_HDR_SIZE 0x0
+
+#ifndef __ASSEMBLY__
+
+struct __sc_riscv_v_state {
+ struct __riscv_v_ext_state v_state;
+} __attribute__((aligned(16)));
+
+/*
+ * Signal context structure
+ *
+ * This contains the context saved before a signal handler is invoked;
+ * it is restored by sys_rt_sigreturn.
+ */
+struct sigcontext {
+ struct user_regs_struct sc_regs;
+ union {
+ union __riscv_fp_state sc_fpregs;
+ struct __riscv_extra_ext_header sc_extdesc;
+ };
+};
+
+#endif /*!__ASSEMBLY__*/
+
+#endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
diff --git a/riscv/include/uapi/asm/ucontext.h b/riscv/include/uapi/asm/ucontext.h
new file mode 100644
index 0000000..516bd0b
--- /dev/null
+++ b/riscv/include/uapi/asm/ucontext.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive, Inc.
+ *
+ * This file was copied from arch/arm64/include/uapi/asm/ucontext.h
+ */
+#ifndef _UAPI_ASM_RISCV_UCONTEXT_H
+#define _UAPI_ASM_RISCV_UCONTEXT_H
+
+#include <linux/types.h>
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ sigset_t uc_sigmask;
+ /*
+ * There's some padding here to allow sigset_t to be expanded in the
+ * future. Though this is unlikely, other architectures put uc_sigmask
+ * at the end of this structure and explicitly state it can be
+ * expanded, so we didn't want to box ourselves in here.
+ */
+ __u8 __unused[1024 / 8 - sizeof(sigset_t)];
+ /*
+ * We can't put uc_sigmask at the end of this structure because we need
+ * to be able to expand sigcontext in the future. For example, the
+ * vector ISA extension will almost certainly add ISA state. We want
+ * to ensure all user-visible ISA state can be saved and restored via a
+ * ucontext, so we're putting this at the end in order to allow for
+ * infinite extensibility. Since we know this will be extended and we
+ * assume sigset_t won't be extended an extreme amount, we're
+ * prioritizing this.
+ */
+ struct sigcontext uc_mcontext;
+};
+
+#endif /* _UAPI_ASM_RISCV_UCONTEXT_H */
diff --git a/riscv/include/uapi/asm/unistd.h b/riscv/include/uapi/asm/unistd.h
new file mode 100644
index 0000000..950ab3f
--- /dev/null
+++ b/riscv/include/uapi/asm/unistd.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#if defined(__LP64__) && !defined(__SYSCALL_COMPAT)
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#endif /* __LP64__ */
+
+#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
+
+#include <asm-generic/unistd.h>
+
+/*
+ * Allows the instruction cache to be flushed from userspace. Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart. There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller. We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#ifndef __NR_riscv_flush_icache
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
+
+/*
+ * Allows userspace to query the kernel for CPU architecture and
+ * microarchitecture details across a given set of CPUs.
+ */
+#ifndef __NR_riscv_hwprobe
+#define __NR_riscv_hwprobe (__NR_arch_specific_syscall + 14)
+#endif
+__SYSCALL(__NR_riscv_hwprobe, sys_riscv_hwprobe)
diff --git a/riscv/intel/pmap.c b/riscv/intel/pmap.c
new file mode 100644
index 0000000..f2122a1
--- /dev/null
+++ b/riscv/intel/pmap.c
@@ -0,0 +1,3322 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pmap.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ * (These guys wrote the Vax version)
+ *
+ * Physical Map management code for Intel i386, and i486.
+ *
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <string.h>
+
+#include <mach/machine/vm_types.h>
+
+#include <mach/boolean.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/slab.h>
+
+#include <kern/lock.h>
+
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <i386/vm_param.h>
+#include <mach/vm_prot.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_user.h>
+
+#include <mach/machine/vm_param.h>
+#include <mach/xen.h>
+#include <machine/thread.h>
+#include <i386/cpu_number.h>
+#include <i386/proc_reg.h>
+#include <i386/locore.h>
+#include <i386/model_dep.h>
+#include <i386/spl.h>
+#include <i386at/biosmem.h>
+#include <i386at/model_dep.h>
+
+#if NCPUS > 1
+#include <i386/mp_desc.h>
+#endif
+
+#include <ddb/db_output.h>
+#include <machine/db_machdep.h>
+
+#ifdef MACH_PSEUDO_PHYS
+#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = pte_entry?pa_to_ma(pte_entry):0;
+#else /* MACH_PSEUDO_PHYS */
+#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry);
+#endif /* MACH_PSEUDO_PHYS */
+
+/*
+ * Private data structures.
+ */
+
+/*
+ * For each vm_page_t, there is a list of all currently
+ * valid virtual mappings of that page. An entry is
+ * a pv_entry_t; the list is the pv_table.
+ */
+
+typedef struct pv_entry {
+ struct pv_entry *next; /* next pv_entry */
+ pmap_t pmap; /* pmap where mapping lies */
+ vm_offset_t va; /* virtual address for mapping */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+pv_entry_t pv_head_table; /* array of entries, one per page */
+
+/*
+ * pv_list entries are kept on a list that can only be accessed
+ * with the pmap system locked (at SPLVM, not in the cpus_active set).
+ * The list is refilled from the pv_list_cache if it becomes empty.
+ */
+pv_entry_t pv_free_list; /* free list at SPLVM */
+def_simple_lock_data(static, pv_free_list_lock)
+
+#define PV_ALLOC(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ if ((pv_e = pv_free_list) != 0) { \
+ pv_free_list = pv_e->next; \
+ } \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+#define PV_FREE(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ pv_e->next = pv_free_list; \
+ pv_free_list = pv_e; \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+struct kmem_cache pv_list_cache; /* cache of pv_entry structures */
+
+/*
+ * Each entry in the pv_head_table is locked by a bit in the
+ * pv_lock_table. The lock bits are accessed by the physical
+ * address of the page they lock.
+ */
+
+char *pv_lock_table; /* pointer to array of bits */
+#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
+
+/* Has pmap_init completed? */
+boolean_t pmap_initialized = FALSE;
+
+/*
+ * Range of kernel virtual addresses available for kernel memory mapping.
+ * Does not include the virtual addresses used to map physical memory 1-1.
+ * Initialized by pmap_bootstrap.
+ */
+vm_offset_t kernel_virtual_start;
+vm_offset_t kernel_virtual_end;
+
+/*
+ * Index into pv_head table, its lock bits, and the modify/reference
+ * bits.
+ */
+#define pa_index(pa) vm_page_table_index(pa)
+
+#define pai_to_pvh(pai) (&pv_head_table[pai])
+#define lock_pvh_pai(pai) (bit_lock(pai, pv_lock_table))
+#define unlock_pvh_pai(pai) (bit_unlock(pai, pv_lock_table))
+
+/*
+ * Array of physical page attributes for managed pages.
+ * One byte per physical page.
+ */
+char *pmap_phys_attributes;
+
+/*
+ * Physical page attributes. Copy bits from PTE definition.
+ */
+#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */
+#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */
+
+/*
+ * Amount of virtual memory mapped by one
+ * page-directory entry.
+ */
+#define PDE_MAPPED_SIZE (pdenum2lin(1))
+
+/*
+ * We allocate page table pages directly from the VM system
+ * through this object. It maps physical memory.
+ */
+vm_object_t pmap_object = VM_OBJECT_NULL;
+
+/*
+ * Locking and TLB invalidation
+ */
+
+/*
+ * Locking Protocols:
+ *
+ * There are two structures in the pmap module that need locking:
+ * the pmaps themselves, and the per-page pv_lists (which are locked
+ * by locking the pv_lock_table entry that corresponds to the pv_head
+ * for the list in question.) Most routines want to lock a pmap and
+ * then do operations in it that require pv_list locking -- however
+ * pmap_remove_all and pmap_copy_on_write operate on a physical page
+ * basis and want to do the locking in the reverse order, i.e. lock
+ * a pv_list and then go through all the pmaps referenced by that list.
+ * To protect against deadlock between these two cases, the pmap_lock
+ * is used. There are three different locking protocols as a result:
+ *
+ * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
+ * the pmap.
+ *
+ * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
+ * lock on the pmap_lock (shared read), then lock the pmap
+ * and finally the pv_lists as needed [i.e. pmap lock before
+ * pv_list lock.]
+ *
+ * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
+ * Get a write lock on the pmap_lock (exclusive write); this
+ * also guaranteees exclusive access to the pv_lists. Lock the
+ * pmaps as needed.
+ *
+ * At no time may any routine hold more than one pmap lock or more than
+ * one pv_list lock. Because interrupt level routines can allocate
+ * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
+ * kernel_pmap can only be held at splvm.
+ */
+
+#if NCPUS > 1
+/*
+ * We raise the interrupt level to splvm, to block interprocessor
+ * interrupts during pmap operations. We must take the CPU out of
+ * the cpus_active set while interrupts are blocked.
+ */
+#define SPLVM(spl) { \
+ spl = splvm(); \
+ i_bit_clear(cpu_number(), &cpus_active); \
+}
+
+#define SPLX(spl) { \
+ i_bit_set(cpu_number(), &cpus_active); \
+ splx(spl); \
+}
+
+/*
+ * Lock on pmap system
+ */
+lock_data_t pmap_system_lock;
+
+#define PMAP_READ_LOCK(pmap, spl) { \
+ SPLVM(spl); \
+ lock_read(&pmap_system_lock); \
+ simple_lock(&(pmap)->lock); \
+}
+
+#define PMAP_WRITE_LOCK(spl) { \
+ SPLVM(spl); \
+ lock_write(&pmap_system_lock); \
+}
+
+#define PMAP_READ_UNLOCK(pmap, spl) { \
+ simple_unlock(&(pmap)->lock); \
+ lock_read_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_UNLOCK(spl) { \
+ lock_write_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
+ simple_lock(&(pmap)->lock); \
+ lock_write_to_read(&pmap_system_lock); \
+}
+
+#define LOCK_PVH(index) (lock_pvh_pai(index))
+
+#define UNLOCK_PVH(index) (unlock_pvh_pai(index))
+
+#define PMAP_UPDATE_TLBS(pmap, s, e) \
+{ \
+ cpu_set cpu_mask = 1 << cpu_number(); \
+ cpu_set users; \
+ \
+ /* Since the pmap is locked, other updates are locked */ \
+ /* out, and any pmap_activate has finished. */ \
+ \
+ /* find other cpus using the pmap */ \
+ users = (pmap)->cpus_using & ~cpu_mask; \
+ if (users) { \
+ /* signal them, and wait for them to finish */ \
+ /* using the pmap */ \
+ signal_cpus(users, (pmap), (s), (e)); \
+ while ((pmap)->cpus_using & cpus_active & ~cpu_mask) \
+ cpu_pause(); \
+ } \
+ \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using & cpu_mask) { \
+ INVALIDATE_TLB((pmap), (s), (e)); \
+ } \
+}
+
+#else /* NCPUS > 1 */
+
+#define SPLVM(spl) ((void)(spl))
+#define SPLX(spl) ((void)(spl))
+
+#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
+#define PMAP_WRITE_LOCK(spl) SPLVM(spl)
+#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
+#define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
+#define PMAP_WRITE_TO_READ_LOCK(pmap)
+
+#define LOCK_PVH(index)
+#define UNLOCK_PVH(index)
+
+#define PMAP_UPDATE_TLBS(pmap, s, e) { \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using) { \
+ INVALIDATE_TLB((pmap), (s), (e)); \
+ } \
+}
+
+#endif /* NCPUS > 1 */
+
+#ifdef MACH_PV_PAGETABLES
+#define INVALIDATE_TLB(pmap, s, e) do { \
+ if (__builtin_constant_p((e) - (s)) \
+ && (e) - (s) == PAGE_SIZE) \
+ hyp_invlpg((pmap) == kernel_pmap ? kvtolin(s) : (s)); \
+ else \
+ hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL); \
+} while(0)
+#else /* MACH_PV_PAGETABLES */
+/* It is hard to know when a TLB flush becomes less expensive than a bunch of
+ * invlpgs. But it surely is more expensive than just one invlpg. */
+#define INVALIDATE_TLB(pmap, s, e) do { \
+ if (__builtin_constant_p((e) - (s)) \
+ && (e) - (s) == PAGE_SIZE) \
+ invlpg_linear((pmap) == kernel_pmap ? kvtolin(s) : (s)); \
+ else \
+ flush_tlb(); \
+} while (0)
+#endif /* MACH_PV_PAGETABLES */
+
+
+#if NCPUS > 1
+/*
+ * Structures to keep track of pending TLB invalidations
+ */
+
+#define UPDATE_LIST_SIZE 4
+
+struct pmap_update_item {
+ pmap_t pmap; /* pmap to invalidate */
+ vm_offset_t start; /* start address to invalidate */
+ vm_offset_t end; /* end address to invalidate */
+} ;
+
+typedef struct pmap_update_item *pmap_update_item_t;
+
+/*
+ * List of pmap updates. If the list overflows,
+ * the last entry is changed to invalidate all.
+ */
+struct pmap_update_list {
+ decl_simple_lock_data(, lock)
+ int count;
+ struct pmap_update_item item[UPDATE_LIST_SIZE];
+} ;
+typedef struct pmap_update_list *pmap_update_list_t;
+
+struct pmap_update_list cpu_update_list[NCPUS];
+
+cpu_set cpus_active;
+cpu_set cpus_idle;
+volatile
+boolean_t cpu_update_needed[NCPUS];
+
+#endif /* NCPUS > 1 */
+
+/*
+ * Other useful macros.
+ */
+#define current_pmap() (vm_map_pmap(current_thread()->task->map))
+#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
+
+struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+struct kmem_cache pmap_cache; /* cache of pmap structures */
+struct kmem_cache pt_cache; /* cache of page tables */
+struct kmem_cache pd_cache; /* cache of page directories */
+#if PAE
+struct kmem_cache pdpt_cache; /* cache of page directory pointer tables */
+#ifdef __x86_64__
+struct kmem_cache l4_cache; /* cache of L4 tables */
+#endif /* __x86_64__ */
+#endif /* PAE */
+
+boolean_t pmap_debug = FALSE; /* flag for debugging prints */
+
+#if 0
+int ptes_per_vm_page; /* number of hardware ptes needed
+ to map one VM page. */
+#else
+#define ptes_per_vm_page 1
+#endif
+
+unsigned int inuse_ptepages_count = 0; /* debugging */
+
+/*
+ * Pointer to the basic page directory for the kernel.
+ * Initialized by pmap_bootstrap().
+ */
+pt_entry_t *kernel_page_dir;
+
+/*
+ * Two slots for temporary physical page mapping, to allow for
+ * physical-to-physical transfers.
+ */
+static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS * NCPUS];
+#define MAPWINDOW_SIZE (PMAP_NMAPWINDOWS * NCPUS * PAGE_SIZE)
+
+#ifdef __x86_64__
+static inline pt_entry_t *
+pmap_l4base(const pmap_t pmap, vm_offset_t lin_addr)
+{
+ return &pmap->l4base[lin2l4num(lin_addr)];
+}
+#endif
+
+#ifdef PAE
+static inline pt_entry_t *
+pmap_ptp(const pmap_t pmap, vm_offset_t lin_addr)
+{
+ pt_entry_t *pdp_table;
+#ifdef __x86_64__
+ pt_entry_t *l4_table;
+ l4_table = pmap_l4base(pmap, lin_addr);
+ if (l4_table == PT_ENTRY_NULL)
+ return(PT_ENTRY_NULL);
+ pt_entry_t pdp = *l4_table;
+ if ((pdp & INTEL_PTE_VALID) == 0)
+ return PT_ENTRY_NULL;
+ pdp_table = (pt_entry_t *) ptetokv(pdp);
+#else /* __x86_64__ */
+ pdp_table = pmap->pdpbase;
+#endif /* __x86_64__ */
+ return &pdp_table[lin2pdpnum(lin_addr)];
+}
+#endif
+
+static inline pt_entry_t *
+pmap_pde(const pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *page_dir;
+ if (pmap == kernel_pmap)
+ addr = kvtolin(addr);
+#if PAE
+ pt_entry_t *pdp_table;
+ pdp_table = pmap_ptp(pmap, addr);
+ if (pdp_table == PT_ENTRY_NULL)
+ return(PT_ENTRY_NULL);
+ pt_entry_t pde = *pdp_table;
+ if ((pde & INTEL_PTE_VALID) == 0)
+ return PT_ENTRY_NULL;
+ page_dir = (pt_entry_t *) ptetokv(pde);
+#else /* PAE */
+ page_dir = pmap->dirbase;
+#endif /* PAE */
+ return &page_dir[lin2pdenum(addr)];
+}
+
+/*
+ * Given an offset and a map, compute the address of the
+ * pte. If the address is invalid with respect to the map
+ * then PT_ENTRY_NULL is returned (and the map may need to grow).
+ *
+ * This is only used internally.
+ */
+pt_entry_t *
+pmap_pte(const pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *ptp;
+ pt_entry_t pte;
+
+#ifdef __x86_64__
+ if (pmap->l4base == 0)
+ return(PT_ENTRY_NULL);
+#elif PAE
+ if (pmap->pdpbase == 0)
+ return(PT_ENTRY_NULL);
+#else
+ if (pmap->dirbase == 0)
+ return(PT_ENTRY_NULL);
+#endif
+ ptp = pmap_pde(pmap, addr);
+ if (ptp == 0)
+ return(PT_ENTRY_NULL);
+ pte = *ptp;
+ if ((pte & INTEL_PTE_VALID) == 0)
+ return(PT_ENTRY_NULL);
+ ptp = (pt_entry_t *)ptetokv(pte);
+ return(&ptp[ptenum(addr)]);
+}
+
+#define DEBUG_PTE_PAGE 0
+
+#if DEBUG_PTE_PAGE
+void ptep_check(ptep_t ptep)
+{
+ pt_entry_t *pte, *epte;
+ int ctu, ctw;
+
+ /* check the use and wired counts */
+ if (ptep == PTE_PAGE_NULL)
+ return;
+ pte = pmap_pte(ptep->pmap, ptep->va);
+ epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t);
+ ctu = 0;
+ ctw = 0;
+ while (pte < epte) {
+ if (pte->pfn != 0) {
+ ctu++;
+ if (pte->wired)
+ ctw++;
+ }
+ pte += ptes_per_vm_page;
+ }
+
+ if (ctu != ptep->use_count || ctw != ptep->wired_count) {
+ printf("use %d wired %d - actual use %d wired %d\n",
+ ptep->use_count, ptep->wired_count, ctu, ctw);
+ panic("pte count");
+ }
+}
+#endif /* DEBUG_PTE_PAGE */
+
+/*
+ * Back-door routine for mapping kernel VM at initialization.
+ * Useful for mapping memory outside the range of direct mapped
+ * physical memory (i.e., devices).
+ */
+vm_offset_t pmap_map_bd(
+ vm_offset_t virt,
+ phys_addr_t start,
+ phys_addr_t end,
+ vm_prot_t prot)
+{
+ pt_entry_t template;
+ pt_entry_t *pte;
+ int spl;
+#ifdef MACH_PV_PAGETABLES
+ int n, i = 0;
+ struct mmu_update update[HYP_BATCH_MMU_UPDATES];
+#endif /* MACH_PV_PAGETABLES */
+
+ template = pa_to_pte(start)
+ | INTEL_PTE_NCACHE|INTEL_PTE_WTHRU
+ | INTEL_PTE_VALID;
+ if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+ template |= INTEL_PTE_GLOBAL;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+
+ PMAP_READ_LOCK(kernel_pmap, spl);
+ while (start < end) {
+ pte = pmap_pte(kernel_pmap, virt);
+ if (pte == PT_ENTRY_NULL)
+ panic("pmap_map_bd: Invalid kernel address\n");
+#ifdef MACH_PV_PAGETABLES
+ update[i].ptr = kv_to_ma(pte);
+ update[i].val = pa_to_ma(template);
+ i++;
+ if (i == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_map_bd\n");
+ i = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, template)
+#endif /* MACH_PV_PAGETABLES */
+ pte_increment_pa(template);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+#ifdef MACH_PV_PAGETABLES
+ if (i > HYP_BATCH_MMU_UPDATES)
+ panic("overflowed array in pmap_map_bd");
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_map_bd\n");
+#endif /* MACH_PV_PAGETABLES */
+ PMAP_READ_UNLOCK(kernel_pmap, spl);
+ return(virt);
+}
+
+#ifdef PAE
+static void pmap_bootstrap_pae(void)
+{
+ vm_offset_t addr;
+ pt_entry_t *pdp_kernel;
+
+#ifdef __x86_64__
+#ifdef MACH_HYP
+ kernel_pmap->user_l4base = NULL;
+ kernel_pmap->user_pdpbase = NULL;
+#endif
+ kernel_pmap->l4base = (pt_entry_t*)phystokv(pmap_grab_page());
+ memset(kernel_pmap->l4base, 0, INTEL_PGBYTES);
+#else
+ const int PDPNUM_KERNEL = PDPNUM;
+#endif /* x86_64 */
+
+ init_alloc_aligned(PDPNUM_KERNEL * INTEL_PGBYTES, &addr);
+ kernel_page_dir = (pt_entry_t*)phystokv(addr);
+ memset(kernel_page_dir, 0, PDPNUM_KERNEL * INTEL_PGBYTES);
+
+ pdp_kernel = (pt_entry_t*)phystokv(pmap_grab_page());
+ memset(pdp_kernel, 0, INTEL_PGBYTES);
+ for (int i = 0; i < PDPNUM_KERNEL; i++) {
+ int pdp_index = i;
+#ifdef __x86_64__
+ pdp_index += lin2pdpnum(VM_MIN_KERNEL_ADDRESS);
+#endif
+ WRITE_PTE(&pdp_kernel[pdp_index],
+ pa_to_pte(_kvtophys((void *) kernel_page_dir
+ + i * INTEL_PGBYTES))
+ | INTEL_PTE_VALID
+#if (defined(__x86_64__) && !defined(MACH_HYP)) || defined(MACH_PV_PAGETABLES)
+ | INTEL_PTE_WRITE
+#endif
+ );
+ }
+
+#ifdef __x86_64__
+ /* only fill the kernel pdpte during bootstrap */
+ WRITE_PTE(&kernel_pmap->l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)],
+ pa_to_pte(_kvtophys(pdp_kernel)) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly_init(kernel_pmap->l4base);
+#endif /* MACH_PV_PAGETABLES */
+#else /* x86_64 */
+ kernel_pmap->pdpbase = pdp_kernel;
+#endif /* x86_64 */
+}
+#endif /* PAE */
+
+#ifdef MACH_PV_PAGETABLES
+#ifdef PAE
+#define NSUP_L1 4
+#else
+#define NSUP_L1 1
+#endif
+static void pmap_bootstrap_xen(pt_entry_t *l1_map[NSUP_L1])
+{
+ /* We don't actually deal with the CR3 register content at all */
+ hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
+ /*
+ * Xen may only provide as few as 512KB extra bootstrap linear memory,
+ * which is far from enough to map all available memory, so we need to
+ * map more bootstrap linear memory. We here map 1 (resp. 4 for PAE)
+ * other L1 table(s), thus 4MiB extra memory (resp. 8MiB), which is
+ * enough for a pagetable mapping 4GiB.
+ */
+ vm_offset_t la;
+ int n_l1map;
+ for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS; la >= VM_MIN_KERNEL_ADDRESS; la += NPTES * PAGE_SIZE) {
+ pt_entry_t *base = (pt_entry_t*) boot_info.pt_base;
+#ifdef PAE
+#ifdef __x86_64__
+ base = (pt_entry_t*) ptetokv(base[0]);
+#endif /* x86_64 */
+ pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[lin2pdpnum(la)]);
+#else /* PAE */
+ pt_entry_t *l2_map = base;
+#endif /* PAE */
+ /* Like lin2pdenum, but works with non-contiguous boot L3 */
+ l2_map += (la >> PDESHIFT) & PDEMASK;
+ if (!(*l2_map & INTEL_PTE_VALID)) {
+ struct mmu_update update;
+ unsigned j, n;
+
+ l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page());
+ for (j = 0; j < NPTES; j++)
+ l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ pmap_set_page_readonly_init(l1_map[n_l1map]);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (l1_map[n_l1map])))
+ panic("couldn't pin page %p(%lx)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map]));
+ update.ptr = kv_to_ma(l2_map);
+ update.val = kv_to_ma(l1_map[n_l1map]) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&n), DOMID_SELF);
+ if (n != 1)
+ panic("couldn't complete bootstrap map");
+ /* added the last L1 table, can stop */
+ if (++n_l1map >= NSUP_L1)
+ break;
+ }
+ }
+}
+#endif /* MACH_PV_PAGETABLES */
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Allocate the kernel page directory and page tables,
+ * and direct-map all physical memory.
+ * Called with mapping off.
+ */
+void pmap_bootstrap(void)
+{
+ /*
+ * Mapping is turned off; we must reference only physical addresses.
+ * The load image of the system is to be mapped 1-1 physical = virtual.
+ */
+
+ /*
+ * Set ptes_per_vm_page for general use.
+ */
+#if 0
+ ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES;
+#endif
+
+ /*
+ * The kernel's pmap is statically allocated so we don't
+ * have to use pmap_create, which is unlikely to work
+ * correctly at this part of the boot sequence.
+ */
+
+ kernel_pmap = &kernel_pmap_store;
+
+#if NCPUS > 1
+ lock_init(&pmap_system_lock, FALSE); /* NOT a sleep lock */
+#endif /* NCPUS > 1 */
+
+ simple_lock_init(&kernel_pmap->lock);
+
+ kernel_pmap->ref_count = 1;
+
+ /*
+ * Determine the kernel virtual address range.
+ * It starts at the end of the physical memory
+ * mapped into the kernel address space,
+ * and extends to a stupid arbitrary limit beyond that.
+ */
+ kernel_virtual_start = phystokv(biosmem_directmap_end());
+ kernel_virtual_end = kernel_virtual_start + VM_KERNEL_MAP_SIZE;
+
+ if (kernel_virtual_end < kernel_virtual_start
+ || kernel_virtual_end > VM_MAX_KERNEL_ADDRESS - PAGE_SIZE)
+ kernel_virtual_end = VM_MAX_KERNEL_ADDRESS - PAGE_SIZE;
+
+ /*
+ * Allocate and clear a kernel page directory.
+ */
+ /* Note: initial Xen mapping holds at least 512kB free mapped page.
+ * We use that for directly building our linear mapping. */
+#if PAE
+ pmap_bootstrap_pae();
+#else /* PAE */
+ kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page());
+ {
+ unsigned i;
+ for (i = 0; i < NPDES; i++)
+ kernel_page_dir[i] = 0;
+ }
+#endif /* PAE */
+
+#ifdef MACH_PV_PAGETABLES
+ pt_entry_t *l1_map[NSUP_L1];
+ pmap_bootstrap_xen(l1_map);
+#endif /* MACH_PV_PAGETABLES */
+
+ /*
+ * Allocate and set up the kernel page tables.
+ */
+ {
+ vm_offset_t va;
+ pt_entry_t global = CPU_HAS_FEATURE(CPU_FEATURE_PGE) ? INTEL_PTE_GLOBAL : 0;
+
+ /*
+ * Map virtual memory for all directly mappable physical memory, 1-1,
+ * Make any mappings completely in the kernel's text segment read-only.
+ *
+ * Also allocate some additional all-null page tables afterwards
+ * for kernel virtual memory allocation,
+ * because this PMAP module is too stupid
+ * to allocate new kernel page tables later.
+ * XX fix this
+ */
+ for (va = phystokv(0); va >= phystokv(0) && va < kernel_virtual_end; )
+ {
+ pt_entry_t *pde = kernel_page_dir + lin2pdenum_cont(kvtolin(va));
+ pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page());
+ pt_entry_t *pte;
+
+ /* Initialize the page directory entry. */
+ WRITE_PTE(pde, pa_to_pte((vm_offset_t)_kvtophys(ptable))
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+
+ /* Initialize the page table. */
+ for (pte = ptable; (va < phystokv(biosmem_directmap_end())) && (pte < ptable+NPTES); pte++)
+ {
+ if ((pte - ptable) < ptenum(va))
+ {
+ WRITE_PTE(pte, 0);
+ }
+ else
+#ifdef MACH_PV_PAGETABLES
+ if (va == (vm_offset_t) &hyp_shared_info)
+ {
+ *pte = boot_info.shared_info | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ va += INTEL_PGBYTES;
+ }
+ else
+#endif /* MACH_PV_PAGETABLES */
+ {
+ extern char _start[], etext[];
+
+ if (((va >= (vm_offset_t) _start)
+ && (va + INTEL_PGBYTES <= (vm_offset_t)etext))
+#ifdef MACH_PV_PAGETABLES
+ || (va >= (vm_offset_t) boot_info.pt_base
+ && (va + INTEL_PGBYTES <=
+ (vm_offset_t) ptable + INTEL_PGBYTES))
+#endif /* MACH_PV_PAGETABLES */
+ )
+ {
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
+ | INTEL_PTE_VALID | global);
+ }
+ else
+ {
+#ifdef MACH_PV_PAGETABLES
+ /* Keep supplementary L1 pages read-only */
+ int i;
+ for (i = 0; i < NSUP_L1; i++)
+ if (va == (vm_offset_t) l1_map[i]) {
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
+ | INTEL_PTE_VALID | global);
+ break;
+ }
+ if (i == NSUP_L1)
+#endif /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
+ | INTEL_PTE_VALID | INTEL_PTE_WRITE | global)
+
+ }
+ va += INTEL_PGBYTES;
+ }
+ }
+ for (; pte < ptable+NPTES; pte++)
+ {
+ if (va >= kernel_virtual_end - MAPWINDOW_SIZE && va < kernel_virtual_end)
+ {
+ pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - MAPWINDOW_SIZE))];
+ win->entry = pte;
+ win->vaddr = va;
+ }
+ WRITE_PTE(pte, 0);
+ va += INTEL_PGBYTES;
+ }
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly_init(ptable);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (ptable)))
+ panic("couldn't pin page %p(%lx)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
+#endif /* MACH_PV_PAGETABLES */
+ }
+ }
+
+ /* Architecture-specific code will turn on paging
+ soon after we return from here. */
+}
+
+#ifdef MACH_PV_PAGETABLES
+/* These are only required because of Xen security policies */
+
+/* Set back a page read write */
+void pmap_set_page_readwrite(void *_vaddr) {
+ vm_offset_t vaddr = (vm_offset_t) _vaddr;
+ phys_addr_t paddr = kvtophys(vaddr);
+ vm_offset_t canon_vaddr = phystokv(paddr);
+ if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
+ panic("couldn't set hiMMU readwrite for addr %lx(%lx)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
+ if (canon_vaddr != vaddr)
+ if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
+ panic("couldn't set hiMMU readwrite for paddr %lx(%lx)\n", canon_vaddr, (vm_offset_t) pa_to_ma (paddr));
+}
+
+/* Set a page read only (so as to pin it for instance) */
+void pmap_set_page_readonly(void *_vaddr) {
+ vm_offset_t vaddr = (vm_offset_t) _vaddr;
+ phys_addr_t paddr = kvtophys(vaddr);
+ vm_offset_t canon_vaddr = phystokv(paddr);
+ if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
+ if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
+ panic("couldn't set hiMMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
+ }
+ if (canon_vaddr != vaddr &&
+ *pmap_pde(kernel_pmap, canon_vaddr) & INTEL_PTE_VALID) {
+ if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
+ panic("couldn't set hiMMU readonly for vaddr %lx canon_vaddr %lx paddr %lx (%lx)\n", vaddr, canon_vaddr, paddr, (vm_offset_t) pa_to_ma (paddr));
+ }
+}
+
+/* This needs to be called instead of pmap_set_page_readonly as long as RC3
+ * still points to the bootstrap dirbase, to also fix the bootstrap table. */
+void pmap_set_page_readonly_init(void *_vaddr) {
+ vm_offset_t vaddr = (vm_offset_t) _vaddr;
+#if PAE
+ pt_entry_t *pdpbase = (void*) boot_info.pt_base;
+#ifdef __x86_64__
+ pdpbase = (pt_entry_t *) ptetokv(pdpbase[lin2l4num(vaddr)]);
+#endif
+ /* The bootstrap table does not necessarily use contiguous pages for the pde tables */
+ pt_entry_t *dirbase = (void*) ptetokv(pdpbase[lin2pdpnum(vaddr)]);
+#else
+ pt_entry_t *dirbase = (void*) boot_info.pt_base;
+#endif
+ pt_entry_t *pte = &dirbase[lin2pdenum(vaddr) & PTEMASK];
+ /* Modify our future kernel map (can't use update_va_mapping for this)... */
+ if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
+ if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID))
+ panic("couldn't set hiMMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
+ /* ... and the bootstrap map. */
+ if (*pte & INTEL_PTE_VALID) {
+ if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE))
+ panic("couldn't set MMU readonly for vaddr %lx(%lx)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
+}
+
+void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
+ unsigned i;
+ pt_entry_t *dir;
+ vm_offset_t va = 0;
+#ifdef __x86_64__
+ int l4i, l3i;
+#else
+#if PAE
+ unsigned j;
+#endif /* PAE */
+#endif
+ if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(base)))
+ panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%lx)\n", base, (vm_offset_t) kv_to_ma(base));
+#ifdef __x86_64__
+ /* 4-level page table */
+ for (l4i = 0; l4i < NPTES && va < HYP_VIRT_START && va < 0x0000800000000000UL; l4i++) {
+ pt_entry_t l4e = base[l4i];
+ pt_entry_t *l3;
+ if (!(l4e & INTEL_PTE_VALID)) {
+ va += NPTES * NPTES * NPTES * INTEL_PGBYTES;
+ continue;
+ }
+ l3 = (pt_entry_t *) ptetokv(l4e);
+
+ for (l3i = 0; l3i < NPTES && va < HYP_VIRT_START; l3i++) {
+ pt_entry_t l3e = l3[l3i];
+ if (!(l3e & INTEL_PTE_VALID)) {
+ va += NPTES * NPTES * INTEL_PGBYTES;
+ continue;
+ }
+ dir = (pt_entry_t *) ptetokv(l3e);
+#else
+#if PAE
+ /* 3-level page table */
+ for (j = 0; j < PDPNUM && va < HYP_VIRT_START; j++)
+ {
+ pt_entry_t pdpe = base[j];
+ if (!(pdpe & INTEL_PTE_VALID)) {
+ va += NPTES * NPTES * INTEL_PGBYTES;
+ continue;
+ }
+ dir = (pt_entry_t *) ptetokv(pdpe);
+#else /* PAE */
+ /* 2-level page table */
+ dir = base;
+#endif /* PAE */
+#endif
+ for (i = 0; i < NPTES && va < HYP_VIRT_START; i++) {
+ pt_entry_t pde = dir[i];
+ unsigned long pfn = atop(pte_to_pa(pde));
+ void *pgt = (void*) phystokv(ptoa(pfn));
+ if (pde & INTEL_PTE_VALID)
+ hyp_free_page(pfn, pgt);
+ va += NPTES * INTEL_PGBYTES;
+ }
+#ifndef __x86_64__
+#if PAE
+ hyp_free_page(atop(_kvtophys(dir)), dir);
+ }
+#endif /* PAE */
+#else
+ hyp_free_page(atop(_kvtophys(dir)), dir);
+ }
+ hyp_free_page(atop(_kvtophys(l3)), l3);
+ }
+#endif
+ hyp_free_page(atop(_kvtophys(base)), base);
+}
+#endif /* MACH_PV_PAGETABLES */
+
+/*
+ * Create a temporary mapping for a given physical entry
+ *
+ * This can be used to access physical pages which are not mapped 1:1 by
+ * phystokv().
+ */
+pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry)
+{
+ pmap_mapwindow_t *map;
+ int cpu = cpu_number();
+
+ assert(entry != 0);
+
+ /* Find an empty one. */
+ for (map = &mapwindows[cpu * PMAP_NMAPWINDOWS]; map < &mapwindows[(cpu+1) * PMAP_NMAPWINDOWS]; map++)
+ if (!(*map->entry))
+ break;
+ assert(map < &mapwindows[(cpu+1) * PMAP_NMAPWINDOWS]);
+
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(map->entry), pa_to_ma(entry)))
+ panic("pmap_get_mapwindow");
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(map->entry, entry);
+#endif /* MACH_PV_PAGETABLES */
+ INVALIDATE_TLB(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE);
+ return map;
+}
+
+/*
+ * Destroy a temporary mapping for a physical entry
+ */
+void pmap_put_mapwindow(pmap_mapwindow_t *map)
+{
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(map->entry), 0))
+ panic("pmap_put_mapwindow");
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(map->entry, 0);
+#endif /* MACH_PV_PAGETABLES */
+ INVALIDATE_TLB(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE);
+}
+
+void pmap_virtual_space(
+ vm_offset_t *startp,
+ vm_offset_t *endp)
+{
+ *startp = kernel_virtual_start;
+ *endp = kernel_virtual_end - MAPWINDOW_SIZE;
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void pmap_init(void)
+{
+ unsigned long npages;
+ vm_offset_t addr;
+ vm_size_t s;
+#if NCPUS > 1
+ int i;
+#endif /* NCPUS > 1 */
+
+ /*
+ * Allocate memory for the pv_head_table and its lock bits,
+ * the modify bit array, and the pte_page table.
+ */
+
+ npages = vm_page_table_size();
+ s = (vm_size_t) (sizeof(struct pv_entry) * npages
+ + pv_lock_table_size(npages)
+ + npages);
+
+ s = round_page(s);
+ if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
+ panic("pmap_init");
+ memset((void *) addr, 0, s);
+
+ /*
+ * Allocate the structures first to preserve word-alignment.
+ */
+ pv_head_table = (pv_entry_t) addr;
+ addr = (vm_offset_t) (pv_head_table + npages);
+
+ pv_lock_table = (char *) addr;
+ addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages));
+
+ pmap_phys_attributes = (char *) addr;
+
+ /*
+ * Create the cache of physical maps,
+ * and of the physical-to-virtual entries.
+ */
+ s = (vm_size_t) sizeof(struct pmap);
+ kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, 0);
+ kmem_cache_init(&pt_cache, "pmap_L1",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+ kmem_cache_init(&pd_cache, "pmap_L2",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+#if PAE
+ kmem_cache_init(&pdpt_cache, "pmap_L3",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+#ifdef __x86_64__
+ kmem_cache_init(&l4_cache, "pmap_L4",
+ INTEL_PGBYTES, INTEL_PGBYTES, NULL,
+ KMEM_CACHE_PHYSMEM);
+#endif /* __x86_64__ */
+#endif /* PAE */
+ s = (vm_size_t) sizeof(struct pv_entry);
+ kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, 0);
+
+#if NCPUS > 1
+ /*
+ * Set up the pmap request lists
+ */
+ for (i = 0; i < NCPUS; i++) {
+ pmap_update_list_t up = &cpu_update_list[i];
+
+ simple_lock_init(&up->lock);
+ up->count = 0;
+ }
+#endif /* NCPUS > 1 */
+
+ /*
+ * Indicate that the PMAP module is now fully initialized.
+ */
+ pmap_initialized = TRUE;
+}
+
+static inline boolean_t
+valid_page(phys_addr_t addr)
+{
+ struct vm_page *p;
+
+ if (!pmap_initialized)
+ return FALSE;
+
+ p = vm_page_lookup_pa(addr);
+ return (p != NULL);
+}
+
+/*
+ * Routine: pmap_page_table_page_alloc
+ *
+ * Allocates a new physical page to be used as a page-table page.
+ *
+ * Must be called with the pmap system and the pmap unlocked,
+ * since these must be unlocked to use vm_page_grab.
+ */
+static vm_offset_t
+pmap_page_table_page_alloc(void)
+{
+ vm_page_t m;
+ phys_addr_t pa;
+
+ check_simple_locks();
+
+ /*
+ * We cannot allocate the pmap_object in pmap_init,
+ * because it is called before the cache package is up.
+ * Allocate it now if it is missing.
+ */
+ if (pmap_object == VM_OBJECT_NULL)
+ pmap_object = vm_object_allocate(vm_page_table_size() * PAGE_SIZE);
+
+ /*
+ * Allocate a VM page for the level 2 page table entries.
+ */
+ while ((m = vm_page_grab(VM_PAGE_DIRECTMAP)) == VM_PAGE_NULL)
+ VM_PAGE_WAIT((void (*)()) 0);
+
+ /*
+ * Map the page to its physical address so that it
+ * can be found later.
+ */
+ pa = m->phys_addr;
+ assert(pa == (vm_offset_t) pa);
+ vm_object_lock(pmap_object);
+ vm_page_insert(m, pmap_object, pa);
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ inuse_ptepages_count++;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+
+ /*
+ * Zero the page.
+ */
+ memset((void *)phystokv(pa), 0, PAGE_SIZE);
+
+ return pa;
+}
+
+#ifdef MACH_XEN
+void pmap_map_mfn(void *_addr, unsigned long mfn) {
+ vm_offset_t addr = (vm_offset_t) _addr;
+ pt_entry_t *pte, *pdp;
+ vm_offset_t ptp;
+ pt_entry_t ma = ((pt_entry_t) mfn) << PAGE_SHIFT;
+
+ /* Add a ptp if none exist yet for this pte */
+ if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL) {
+ ptp = phystokv(pmap_page_table_page_alloc());
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly((void*) ptp);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, pa_to_mfn(ptp)))
+ panic("couldn't pin page %lx(%lx)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
+#endif /* MACH_PV_PAGETABLES */
+ pdp = pmap_pde(kernel_pmap, addr);
+
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pdp),
+ pa_to_pte(kv_to_ma(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE))
+ panic("%s:%d could not set pde %llx(%lx) to %lx(%lx)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp));
+#else /* MACH_PV_PAGETABLES */
+ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ pte = pmap_pte(kernel_pmap, addr);
+ }
+
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), ma | INTEL_PTE_VALID | INTEL_PTE_WRITE))
+ panic("%s:%d could not set pte %p(%lx) to %llx(%llx)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma));
+#else /* MACH_PV_PAGETABLES */
+ /* Note: in this case, mfn is actually a pfn. */
+ WRITE_PTE(pte, ma | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#endif /* MACH_PV_PAGETABLES */
+}
+#endif /* MACH_XEN */
+
+/*
+ * Deallocate a page-table page.
+ * The page-table page must have all mappings removed,
+ * and be removed from its page directory.
+ */
+static void
+pmap_page_table_page_dealloc(vm_offset_t pa)
+{
+ vm_page_t m;
+
+ vm_object_lock(pmap_object);
+ m = vm_page_lookup(pmap_object, pa);
+ vm_page_lock_queues();
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
+ panic("couldn't unpin page %llx(%lx)\n", pa, (vm_offset_t) kv_to_ma(pa));
+ pmap_set_page_readwrite((void*) phystokv(pa));
+#endif /* MACH_PV_PAGETABLES */
+ vm_page_free(m);
+ inuse_ptepages_count--;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ */
+pmap_t pmap_create(vm_size_t size)
+{
+#ifdef __x86_64__
+ // needs to be reworked if we want to dynamically allocate PDPs for kernel
+ const int PDPNUM = PDPNUM_KERNEL;
+#endif
+ pt_entry_t *page_dir[PDPNUM];
+ int i;
+ pmap_t p;
+ pmap_statistics_t stats;
+
+ /*
+ * A software use-only map doesn't even need a map.
+ */
+
+ if (size != 0) {
+ return(PMAP_NULL);
+ }
+
+/*
+ * Allocate a pmap struct from the pmap_cache. Then allocate
+ * the page descriptor table.
+ */
+
+ p = (pmap_t) kmem_cache_alloc(&pmap_cache);
+ if (p == PMAP_NULL)
+ return PMAP_NULL;
+
+ for (i = 0; i < PDPNUM; i++) {
+ page_dir[i] = (pt_entry_t *) kmem_cache_alloc(&pd_cache);
+ if (page_dir[i] == NULL) {
+ i -= 1;
+ while (i >= 0) {
+ kmem_cache_free(&pd_cache,
+ (vm_address_t) page_dir[i]);
+ i -= 1;
+ }
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
+ memcpy(page_dir[i],
+ (void *) kernel_page_dir + i * INTEL_PGBYTES,
+ INTEL_PGBYTES);
+ }
+
+#ifdef LINUX_DEV
+#if VM_MIN_KERNEL_ADDRESS != 0
+ /* Do not map BIOS in user tasks */
+ page_dir
+#if PAE
+ [lin2pdpnum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)]
+#else
+ [0]
+#endif
+ [lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)]
+ = 0;
+#endif
+#endif /* LINUX_DEV */
+
+#ifdef MACH_PV_PAGETABLES
+ {
+ for (i = 0; i < PDPNUM; i++)
+ pmap_set_page_readonly((void *) page_dir[i]);
+ }
+#endif /* MACH_PV_PAGETABLES */
+
+#if PAE
+ pt_entry_t *pdp_kernel = (pt_entry_t *) kmem_cache_alloc(&pdpt_cache);
+ if (pdp_kernel == NULL) {
+ for (i = 0; i < PDPNUM; i++)
+ kmem_cache_free(&pd_cache, (vm_address_t) page_dir[i]);
+ kmem_cache_free(&pmap_cache, (vm_address_t) p);
+ return PMAP_NULL;
+ }
+
+ memset(pdp_kernel, 0, INTEL_PGBYTES);
+ {
+ for (i = 0; i < PDPNUM; i++) {
+ int pdp_index = i;
+#ifdef __x86_64__
+ pdp_index += lin2pdpnum(VM_MIN_KERNEL_ADDRESS);
+#endif
+ WRITE_PTE(&pdp_kernel[pdp_index],
+ pa_to_pte(kvtophys((vm_offset_t) page_dir[i]))
+ | INTEL_PTE_VALID
+#if (defined(__x86_64__) && !defined(MACH_HYP)) || defined(MACH_PV_PAGETABLES)
+ | INTEL_PTE_WRITE
+#ifdef __x86_64__
+ | INTEL_PTE_USER
+#endif /* __x86_64__ */
+#endif
+ );
+ }
+ }
+#ifdef __x86_64__
+ p->l4base = (pt_entry_t *) kmem_cache_alloc(&l4_cache);
+ if (p->l4base == NULL)
+ panic("pmap_create");
+ memset(p->l4base, 0, INTEL_PGBYTES);
+ WRITE_PTE(&p->l4base[lin2l4num(VM_MIN_KERNEL_ADDRESS)],
+ pa_to_pte(kvtophys((vm_offset_t) pdp_kernel)) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#ifdef MACH_PV_PAGETABLES
+ // FIXME: use kmem_cache_alloc instead
+ if (kmem_alloc_wired(kernel_map,
+ (vm_offset_t *)&p->user_pdpbase, INTEL_PGBYTES)
+ != KERN_SUCCESS)
+ panic("pmap_create");
+ memset(p->user_pdpbase, 0, INTEL_PGBYTES);
+ {
+ int i;
+ for (i = 0; i < lin2pdpnum(VM_MAX_USER_ADDRESS); i++)
+ WRITE_PTE(&p->user_pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) page_dir[i])) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+ }
+ // FIXME: use kmem_cache_alloc instead
+ if (kmem_alloc_wired(kernel_map,
+ (vm_offset_t *)&p->user_l4base, INTEL_PGBYTES)
+ != KERN_SUCCESS)
+ panic("pmap_create");
+ memset(p->user_l4base, 0, INTEL_PGBYTES);
+ WRITE_PTE(&p->user_l4base[0], pa_to_pte(kvtophys((vm_offset_t) p->user_pdpbase)) | INTEL_PTE_VALID | INTEL_PTE_WRITE);
+#endif /* MACH_PV_PAGETABLES */
+#else /* _x86_64 */
+ p->pdpbase = pdp_kernel;
+#endif /* _x86_64 */
+#ifdef MACH_PV_PAGETABLES
+#ifdef __x86_64__
+ pmap_set_page_readonly(p->l4base);
+ pmap_set_page_readonly(p->user_l4base);
+ pmap_set_page_readonly(p->user_pdpbase);
+#else
+ pmap_set_page_readonly(p->pdpbase);
+#endif
+#endif /* MACH_PV_PAGETABLES */
+#else /* PAE */
+ p->dirbase = page_dir[0];
+#endif /* PAE */
+
+ p->ref_count = 1;
+
+ simple_lock_init(&p->lock);
+ p->cpus_using = 0;
+
+ /*
+ * Initialize statistics.
+ */
+
+ stats = &p->stats;
+ stats->resident_count = 0;
+ stats->wired_count = 0;
+
+ return(p);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+
+void pmap_destroy(pmap_t p)
+{
+ int c, s;
+
+ if (p == PMAP_NULL)
+ return;
+
+ SPLVM(s);
+ simple_lock(&p->lock);
+ c = --p->ref_count;
+ simple_unlock(&p->lock);
+ SPLX(s);
+
+ if (c != 0) {
+ return; /* still in use */
+ }
+
+ /*
+ * Free the page table tree.
+ */
+#if PAE
+#ifdef __x86_64__
+ for (int l4i = 0; l4i < NPTES; l4i++) {
+ pt_entry_t pdp = (pt_entry_t) p->l4base[l4i];
+ if (!(pdp & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdpbase = (pt_entry_t*) ptetokv(pdp);
+#else /* __x86_64__ */
+ pt_entry_t *pdpbase = p->pdpbase;
+#endif /* __x86_64__ */
+ for (int l3i = 0; l3i < NPTES; l3i++) {
+ pt_entry_t pde = (pt_entry_t) pdpbase[l3i];
+ if (!(pde & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdebase = (pt_entry_t*) ptetokv(pde);
+ if (
+#ifdef __x86_64__
+ l4i < lin2l4num(VM_MAX_USER_ADDRESS) ||
+ (l4i == lin2l4num(VM_MAX_USER_ADDRESS) && l3i < lin2pdpnum(VM_MAX_USER_ADDRESS))
+#else /* __x86_64__ */
+ l3i < lin2pdpnum(VM_MAX_USER_ADDRESS)
+#endif /* __x86_64__ */
+ )
+ for (int l2i = 0; l2i < NPTES; l2i++)
+#else /* PAE */
+ pt_entry_t *pdebase = p->dirbase;
+ for (int l2i = 0; l2i < lin2pdenum(VM_MAX_USER_ADDRESS); l2i++)
+#endif /* PAE */
+ {
+ pt_entry_t pte = (pt_entry_t) pdebase[l2i];
+ if (!(pte & INTEL_PTE_VALID))
+ continue;
+ kmem_cache_free(&pt_cache, (vm_offset_t)ptetokv(pte));
+ }
+ kmem_cache_free(&pd_cache, (vm_offset_t)pdebase);
+#if PAE
+ }
+ kmem_cache_free(&pdpt_cache, (vm_offset_t)pdpbase);
+#ifdef __x86_64__
+ }
+ kmem_cache_free(&l4_cache, (vm_offset_t) p->l4base);
+#endif /* __x86_64__ */
+#endif /* PAE */
+
+ /* Finally, free the pmap itself */
+ kmem_cache_free(&pmap_cache, (vm_offset_t) p);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+
+void pmap_reference(pmap_t p)
+{
+ int s;
+ if (p != PMAP_NULL) {
+ SPLVM(s);
+ simple_lock(&p->lock);
+ p->ref_count++;
+ simple_unlock(&p->lock);
+ SPLX(s);
+ }
+}
+
+/*
+ * Remove a range of hardware page-table entries.
+ * The entries given are the first (inclusive)
+ * and last (exclusive) entries for the VM pages.
+ * The virtual address is the va for the first pte.
+ *
+ * The pmap must be locked.
+ * If the pmap is not the kernel pmap, the range must lie
+ * entirely within one pte-page. This is NOT checked.
+ * Assumes that the pte-page exists.
+ */
+
+static
+void pmap_remove_range(
+ pmap_t pmap,
+ vm_offset_t va,
+ pt_entry_t *spte,
+ pt_entry_t *epte)
+{
+ pt_entry_t *cpte;
+ unsigned long num_removed, num_unwired;
+ unsigned long pai;
+ phys_addr_t pa;
+#ifdef MACH_PV_PAGETABLES
+ int n, ii = 0;
+ struct mmu_update update[HYP_BATCH_MMU_UPDATES];
+#endif /* MACH_PV_PAGETABLES */
+
+ if (pmap == kernel_pmap && (va < kernel_virtual_start || va + (epte-spte)*PAGE_SIZE > kernel_virtual_end))
+ panic("pmap_remove_range(%lx-%lx) falls in physical memory area!\n", (unsigned long) va, (unsigned long) va + (epte-spte)*PAGE_SIZE);
+
+#if DEBUG_PTE_PAGE
+ if (pmap != kernel_pmap)
+ ptep_check(get_pte_page(spte));
+#endif /* DEBUG_PTE_PAGE */
+ num_removed = 0;
+ num_unwired = 0;
+
+ for (cpte = spte; cpte < epte;
+ cpte += ptes_per_vm_page, va += PAGE_SIZE) {
+
+ if (*cpte == 0)
+ continue;
+
+ assert(*cpte & INTEL_PTE_VALID);
+
+ pa = pte_to_pa(*cpte);
+
+ num_removed++;
+ if (*cpte & INTEL_PTE_WIRED)
+ num_unwired++;
+
+ if (!valid_page(pa)) {
+
+ /*
+ * Outside range of managed physical memory.
+ * Just remove the mappings.
+ */
+ int i = ptes_per_vm_page;
+ pt_entry_t *lpte = cpte;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ update[ii].ptr = kv_to_ma(lpte);
+ update[ii].val = 0;
+ ii++;
+ if (ii == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
+ if (n != ii)
+ panic("couldn't pmap_remove_range\n");
+ ii = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ *lpte = 0;
+#endif /* MACH_PV_PAGETABLES */
+ lpte++;
+ } while (--i > 0);
+ continue;
+ }
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+
+ /*
+ * Get the modify and reference bits.
+ */
+ {
+ int i;
+ pt_entry_t *lpte;
+
+ i = ptes_per_vm_page;
+ lpte = cpte;
+ do {
+ pmap_phys_attributes[pai] |=
+ *lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
+#ifdef MACH_PV_PAGETABLES
+ update[ii].ptr = kv_to_ma(lpte);
+ update[ii].val = 0;
+ ii++;
+ if (ii == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
+ if (n != ii)
+ panic("couldn't pmap_remove_range\n");
+ ii = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ *lpte = 0;
+#endif /* MACH_PV_PAGETABLES */
+ lpte++;
+ } while (--i > 0);
+ }
+
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ */
+ {
+ pv_entry_t pv_h, prev, cur;
+
+ pv_h = pai_to_pvh(pai);
+ if (pv_h->pmap == PMAP_NULL) {
+ panic("pmap_remove: null pv_list for pai %lx at va %lx!", pai, (unsigned long) va);
+ }
+ if (pv_h->va == va && pv_h->pmap == pmap) {
+ /*
+ * Header is the pv_entry. Copy the next one
+ * to header and free the next one (we cannot
+ * free the header)
+ */
+ cur = pv_h->next;
+ if (cur != PV_ENTRY_NULL) {
+ *pv_h = *cur;
+ PV_FREE(cur);
+ }
+ else {
+ pv_h->pmap = PMAP_NULL;
+ }
+ }
+ else {
+ cur = pv_h;
+ do {
+ prev = cur;
+ if ((cur = prev->next) == PV_ENTRY_NULL) {
+ panic("pmap-remove: mapping not in pv_list!");
+ }
+ } while (cur->va != va || cur->pmap != pmap);
+ prev->next = cur->next;
+ PV_FREE(cur);
+ }
+ UNLOCK_PVH(pai);
+ }
+ }
+
+#ifdef MACH_PV_PAGETABLES
+ if (ii > HYP_BATCH_MMU_UPDATES)
+ panic("overflowed array in pmap_remove_range");
+ hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
+ if (n != ii)
+ panic("couldn't pmap_remove_range\n");
+#endif /* MACH_PV_PAGETABLES */
+
+ /*
+ * Update the counts
+ */
+ pmap->stats.resident_count -= num_removed;
+ pmap->stats.wired_count -= num_unwired;
+}
+
+/*
+ * Remove the given range of addresses
+ * from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the hardware page size.
+ */
+
+void pmap_remove(
+ pmap_t map,
+ vm_offset_t s,
+ vm_offset_t e)
+{
+ int spl;
+ pt_entry_t *spte, *epte;
+ vm_offset_t l;
+ vm_offset_t _s = s;
+
+ if (map == PMAP_NULL)
+ return;
+
+ PMAP_READ_LOCK(map, spl);
+
+ while (s < e) {
+ pt_entry_t *pde = pmap_pde(map, s);
+
+ l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
+ if (l > e || l < s)
+ l = e;
+ if (pde && (*pde & INTEL_PTE_VALID)) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[ptenum(s)];
+ epte = &spte[intel_btop(l-s)];
+ pmap_remove_range(map, s, spte, epte);
+ }
+ s = l;
+ }
+ PMAP_UPDATE_TLBS(map, _s, e);
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_page_protect
+ *
+ * Function:
+ * Lower the permission for all mappings to a given
+ * page.
+ */
+void pmap_page_protect(
+ phys_addr_t phys,
+ vm_prot_t prot)
+{
+ pv_entry_t pv_h, prev;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ unsigned long pai;
+ pmap_t pmap;
+ int spl;
+ boolean_t remove;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ remove = FALSE;
+ break;
+ case VM_PROT_ALL:
+ return; /* nothing to do */
+ default:
+ remove = TRUE;
+ break;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, changing or removing all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+
+ prev = pv_e = pv_h;
+ do {
+ vm_offset_t va;
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ assert(pte_to_pa(*pte) == phys);
+
+ /*
+ * Remove the mapping if new protection is NONE
+ * or if write-protecting a kernel mapping.
+ */
+ if (remove || pmap == kernel_pmap) {
+ /*
+ * Remove the mapping, collecting any modify bits.
+ */
+
+ if (*pte & INTEL_PTE_WIRED) {
+ pmap->stats.wired_count--;
+ }
+
+ {
+ int i = ptes_per_vm_page;
+
+ do {
+ pmap_phys_attributes[pai] |=
+ *pte & (PHYS_MODIFIED|PHYS_REFERENCED);
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte++), 0))
+ panic("%s:%d could not clear pte %p\n",__FILE__,__LINE__,pte-1);
+#else /* MACH_PV_PAGETABLES */
+ *pte++ = 0;
+#endif /* MACH_PV_PAGETABLES */
+ } while (--i > 0);
+ }
+
+ pmap->stats.resident_count--;
+
+ /*
+ * Remove the pv_entry.
+ */
+ if (pv_e == pv_h) {
+ /*
+ * Fix up head later.
+ */
+ pv_h->pmap = PMAP_NULL;
+ }
+ else {
+ /*
+ * Delete this entry.
+ */
+ prev->next = pv_e->next;
+ PV_FREE(pv_e);
+ }
+ }
+ else {
+ /*
+ * Write-protect.
+ */
+ int i = ptes_per_vm_page;
+
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WRITE))
+ panic("%s:%d could not disable write on pte %p\n",__FILE__,__LINE__,pte);
+#else /* MACH_PV_PAGETABLES */
+ *pte &= ~INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ } while (--i > 0);
+
+ /*
+ * Advance prev.
+ */
+ prev = pv_e;
+ }
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+
+ simple_unlock(&pmap->lock);
+
+ } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+
+ /*
+ * If pv_head mapping was removed, fix it up.
+ */
+ if (pv_h->pmap == PMAP_NULL) {
+ pv_e = pv_h->next;
+ if (pv_e != PV_ENTRY_NULL) {
+ *pv_h = *pv_e;
+ PV_FREE(pv_e);
+ }
+ }
+ }
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ * Will not increase permissions.
+ */
+void pmap_protect(
+ pmap_t map,
+ vm_offset_t s,
+ vm_offset_t e,
+ vm_prot_t prot)
+{
+ pt_entry_t *spte, *epte;
+ vm_offset_t l;
+ int spl;
+ vm_offset_t _s = s;
+
+ if (map == PMAP_NULL)
+ return;
+
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ break;
+ case VM_PROT_READ|VM_PROT_WRITE:
+ case VM_PROT_ALL:
+ return; /* nothing to do */
+ default:
+ pmap_remove(map, s, e);
+ return;
+ }
+
+#if !(__i486__ || __i586__ || __i686__)
+ /*
+ * If write-protecting in the kernel pmap,
+ * remove the mappings; the i386 ignores
+ * the write-permission bit in kernel mode.
+ */
+ if (map == kernel_pmap) {
+ pmap_remove(map, s, e);
+ return;
+ }
+#endif
+
+ SPLVM(spl);
+ simple_lock(&map->lock);
+
+ while (s < e) {
+ pt_entry_t *pde = pde = pmap_pde(map, s);
+
+ l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1);
+ if (l > e || l < s)
+ l = e;
+ if (pde && (*pde & INTEL_PTE_VALID)) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[ptenum(s)];
+ epte = &spte[intel_btop(l-s)];
+
+#ifdef MACH_PV_PAGETABLES
+ int n, i = 0;
+ struct mmu_update update[HYP_BATCH_MMU_UPDATES];
+#endif /* MACH_PV_PAGETABLES */
+
+ while (spte < epte) {
+ if (*spte & INTEL_PTE_VALID) {
+#ifdef MACH_PV_PAGETABLES
+ update[i].ptr = kv_to_ma(spte);
+ update[i].val = *spte & ~INTEL_PTE_WRITE;
+ i++;
+ if (i == HYP_BATCH_MMU_UPDATES) {
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_protect\n");
+ i = 0;
+ }
+#else /* MACH_PV_PAGETABLES */
+ *spte &= ~INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ }
+ spte++;
+ }
+#ifdef MACH_PV_PAGETABLES
+ if (i > HYP_BATCH_MMU_UPDATES)
+ panic("overflowed array in pmap_protect");
+ hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
+ if (n != i)
+ panic("couldn't pmap_protect\n");
+#endif /* MACH_PV_PAGETABLES */
+ }
+ s = l;
+ }
+ PMAP_UPDATE_TLBS(map, _s, e);
+
+ simple_unlock(&map->lock);
+ SPLX(spl);
+}
+
+typedef pt_entry_t* (*pmap_level_getter_t)(const pmap_t pmap, vm_offset_t addr);
+/*
+* Expand one single level of the page table tree
+*/
+static inline pt_entry_t* pmap_expand_level(pmap_t pmap, vm_offset_t v, int spl,
+ pmap_level_getter_t pmap_level,
+ pmap_level_getter_t pmap_level_upper,
+ int n_per_vm_page,
+ struct kmem_cache *cache)
+{
+ pt_entry_t *pte;
+
+ /*
+ * Expand pmap to include this pte. Assume that
+ * pmap is always expanded to include enough hardware
+ * pages to map one VM page.
+ */
+ while ((pte = pmap_level(pmap, v)) == PT_ENTRY_NULL) {
+ /*
+ * Need to allocate a new page-table page.
+ */
+ vm_offset_t ptp;
+ pt_entry_t *pdp;
+ int i;
+
+ if (pmap == kernel_pmap) {
+ /*
+ * Would have to enter the new page-table page in
+ * EVERY pmap.
+ */
+ panic("pmap_expand kernel pmap to %#zx", v);
+ }
+
+ /*
+ * Unlock the pmap and allocate a new page-table page.
+ */
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ while (!(ptp = kmem_cache_alloc(cache)))
+ VM_PAGE_WAIT((void (*)()) 0);
+ memset((void *)ptp, 0, PAGE_SIZE);
+
+ /*
+ * Re-lock the pmap and check that another thread has
+ * not already allocated the page-table page. If it
+ * has, discard the new page-table page (and try
+ * again to make sure).
+ */
+ PMAP_READ_LOCK(pmap, spl);
+
+ if (pmap_level(pmap, v) != PT_ENTRY_NULL) {
+ /*
+ * Oops...
+ */
+ PMAP_READ_UNLOCK(pmap, spl);
+ kmem_cache_free(cache, ptp);
+ PMAP_READ_LOCK(pmap, spl);
+ continue;
+ }
+
+ /*
+ * Enter the new page table page in the page directory.
+ */
+ i = n_per_vm_page;
+ pdp = pmap_level_upper(pmap, v);
+ do {
+#ifdef MACH_PV_PAGETABLES
+ pmap_set_page_readonly((void *) ptp);
+ if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn(ptp)))
+ panic("couldn't pin page %lx(%lx)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
+ if (!hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdp)),
+ pa_to_pte(pa_to_ma(kvtophys(ptp))) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE))
+ panic("%s:%d could not set pde %p(%llx,%lx) to %lx(%llx,%lx) %lx\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
+#else /* MACH_PV_PAGETABLES */
+ *pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
+ | INTEL_PTE_USER
+ | INTEL_PTE_WRITE;
+#endif /* MACH_PV_PAGETABLES */
+ pdp++; /* Note: This is safe b/c we stay in one page. */
+ ptp += INTEL_PGBYTES;
+ } while (--i > 0);
+
+ /*
+ * Now, get the address of the page-table entry.
+ */
+ continue;
+ }
+ return pte;
+}
+
+/*
+ * Expand, if required, the PMAP to include the virtual address V.
+ * PMAP needs to be locked, and it will be still locked on return. It
+ * can temporarily unlock the PMAP, during allocation or deallocation
+ * of physical pages.
+ */
+static inline pt_entry_t* pmap_expand(pmap_t pmap, vm_offset_t v, int spl)
+{
+#ifdef PAE
+#ifdef __x86_64__
+ pmap_expand_level(pmap, v, spl, pmap_ptp, pmap_l4base, 1, &pdpt_cache);
+#endif /* __x86_64__ */
+ pmap_expand_level(pmap, v, spl, pmap_pde, pmap_ptp, 1, &pd_cache);
+#endif /* PAE */
+ return pmap_expand_level(pmap, v, spl, pmap_pte, pmap_pde, ptes_per_vm_page, &pt_cache);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void pmap_enter(
+ pmap_t pmap,
+ vm_offset_t v,
+ phys_addr_t pa,
+ vm_prot_t prot,
+ boolean_t wired)
+{
+ boolean_t is_physmem;
+ pt_entry_t *pte;
+ pv_entry_t pv_h;
+ unsigned long i, pai;
+ pv_entry_t pv_e;
+ pt_entry_t template;
+ int spl;
+ phys_addr_t old_pa;
+
+ assert(pa != vm_page_fictitious_addr);
+ if (pmap_debug) printf("pmap(%zx, %llx)\n", v, (unsigned long long) pa);
+ if (pmap == PMAP_NULL)
+ return;
+
+ if (pmap == kernel_pmap && (v < kernel_virtual_start || v >= kernel_virtual_end))
+ panic("pmap_enter(%lx, %llx) falls in physical memory area!\n", (unsigned long) v, (unsigned long long) pa);
+#if !(__i486__ || __i586__ || __i686__)
+ if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0
+ && !wired /* hack for io_wire */ ) {
+ /*
+ * Because the 386 ignores write protection in kernel mode,
+ * we cannot enter a read-only kernel mapping, and must
+ * remove an existing mapping if changing it.
+ */
+ PMAP_READ_LOCK(pmap, spl);
+
+ pte = pmap_pte(pmap, v);
+ if (pte != PT_ENTRY_NULL && *pte != 0) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
+ pmap_remove_range(pmap, v, pte,
+ pte + ptes_per_vm_page);
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ }
+ PMAP_READ_UNLOCK(pmap, spl);
+ return;
+ }
+#endif
+
+ /*
+ * Must allocate a new pvlist entry while we're unlocked;
+ * Allocating may cause pageout (which will lock the pmap system).
+ * If we determine we need a pvlist entry, we will unlock
+ * and allocate one. Then we will retry, throughing away
+ * the allocated entry later (if we no longer need it).
+ */
+ pv_e = PV_ENTRY_NULL;
+Retry:
+ PMAP_READ_LOCK(pmap, spl);
+
+ pte = pmap_expand(pmap, v, spl);
+
+ if (vm_page_ready())
+ is_physmem = (vm_page_lookup_pa(pa) != NULL);
+ else
+ is_physmem = (pa < biosmem_directmap_end());
+
+ /*
+ * Special case if the physical page is already mapped
+ * at this address.
+ */
+ old_pa = pte_to_pa(*pte);
+ if (*pte && old_pa == pa) {
+ /*
+ * May be changing its wired attribute or protection
+ */
+
+ if (wired && !(*pte & INTEL_PTE_WIRED))
+ pmap->stats.wired_count++;
+ else if (!wired && (*pte & INTEL_PTE_WIRED))
+ pmap->stats.wired_count--;
+
+ template = pa_to_pte(pa) | INTEL_PTE_VALID;
+ if (pmap != kernel_pmap)
+ template |= INTEL_PTE_USER;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+ if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486
+ && !is_physmem)
+ template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU;
+ if (wired)
+ template |= INTEL_PTE_WIRED;
+ i = ptes_per_vm_page;
+ do {
+ if (*pte & INTEL_PTE_MOD)
+ template |= INTEL_PTE_MOD;
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template)))
+ panic("%s:%d could not set pte %p to %llx\n",__FILE__,__LINE__,pte,template);
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, template)
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ }
+ else {
+
+ /*
+ * Remove old mapping from the PV list if necessary.
+ */
+ if (*pte) {
+ /*
+ * Don't free the pte page if removing last
+ * mapping - we will immediately replace it.
+ */
+ pmap_remove_range(pmap, v, pte,
+ pte + ptes_per_vm_page);
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ }
+
+ if (valid_page(pa)) {
+
+ /*
+ * Enter the mapping in the PV list for this
+ * physical page.
+ */
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+ pv_h = pai_to_pvh(pai);
+
+ if (pv_h->pmap == PMAP_NULL) {
+ /*
+ * No mappings yet
+ */
+ pv_h->va = v;
+ pv_h->pmap = pmap;
+ pv_h->next = PV_ENTRY_NULL;
+ }
+ else {
+#if DEBUG
+ {
+ /* check that this mapping is not already there */
+ pv_entry_t e = pv_h;
+ while (e != PV_ENTRY_NULL) {
+ if (e->pmap == pmap && e->va == v)
+ panic("pmap_enter: already in pv_list");
+ e = e->next;
+ }
+ }
+#endif /* DEBUG */
+
+ /*
+ * Add new pv_entry after header.
+ */
+ if (pv_e == PV_ENTRY_NULL) {
+ PV_ALLOC(pv_e);
+ if (pv_e == PV_ENTRY_NULL) {
+ UNLOCK_PVH(pai);
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ /*
+ * Refill from cache.
+ */
+ pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache);
+ goto Retry;
+ }
+ }
+ pv_e->va = v;
+ pv_e->pmap = pmap;
+ pv_e->next = pv_h->next;
+ pv_h->next = pv_e;
+ /*
+ * Remember that we used the pvlist entry.
+ */
+ pv_e = PV_ENTRY_NULL;
+ }
+ UNLOCK_PVH(pai);
+ }
+
+ /*
+ * And count the mapping.
+ */
+
+ pmap->stats.resident_count++;
+ if (wired)
+ pmap->stats.wired_count++;
+
+ /*
+ * Build a template to speed up entering -
+ * only the pfn changes.
+ */
+ template = pa_to_pte(pa) | INTEL_PTE_VALID;
+ if (pmap != kernel_pmap)
+ template |= INTEL_PTE_USER;
+ if (prot & VM_PROT_WRITE)
+ template |= INTEL_PTE_WRITE;
+ if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486
+ && !is_physmem)
+ template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU;
+ if (wired)
+ template |= INTEL_PTE_WIRED;
+ i = ptes_per_vm_page;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!(hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template))))
+ panic("%s:%d could not set pte %p to %llx\n",__FILE__,__LINE__,pte,template);
+#else /* MACH_PV_PAGETABLES */
+ WRITE_PTE(pte, template)
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ }
+
+ if (pv_e != PV_ENTRY_NULL) {
+ PV_FREE(pv_e);
+ }
+
+ PMAP_READ_UNLOCK(pmap, spl);
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void pmap_change_wiring(
+ pmap_t map,
+ vm_offset_t v,
+ boolean_t wired)
+{
+ pt_entry_t *pte;
+ int i;
+ int spl;
+
+ /*
+ * We must grab the pmap system lock because we may
+ * change a pte_page queue.
+ */
+ PMAP_READ_LOCK(map, spl);
+
+ if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+ panic("pmap_change_wiring: pte missing");
+
+ if (wired && !(*pte & INTEL_PTE_WIRED)) {
+ /*
+ * wiring down mapping
+ */
+ map->stats.wired_count++;
+ i = ptes_per_vm_page;
+ do {
+ *pte++ |= INTEL_PTE_WIRED;
+ } while (--i > 0);
+ }
+ else if (!wired && (*pte & INTEL_PTE_WIRED)) {
+ /*
+ * unwiring mapping
+ */
+ map->stats.wired_count--;
+ i = ptes_per_vm_page;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WIRED)))
+ panic("%s:%d could not wire down pte %p\n",__FILE__,__LINE__,pte);
+#else /* MACH_PV_PAGETABLES */
+ *pte &= ~INTEL_PTE_WIRED;
+#endif /* MACH_PV_PAGETABLES */
+ pte++;
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+phys_addr_t pmap_extract(
+ pmap_t pmap,
+ vm_offset_t va)
+{
+ pt_entry_t *pte;
+ phys_addr_t pa;
+ int spl;
+
+ SPLVM(spl);
+ simple_lock(&pmap->lock);
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = 0;
+ else if (!(*pte & INTEL_PTE_VALID))
+ pa = 0;
+ else
+ pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK);
+ simple_unlock(&pmap->lock);
+ SPLX(spl);
+ return(pa);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+#if 0
+void pmap_copy(
+ pmap_t dst_pmap,
+ pmap_t src_pmap,
+ vm_offset_t dst_addr,
+ vm_size_t len,
+ vm_offset_t src_addr)
+{
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_collect
+ * Function:
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Usage:
+ * Called by the pageout daemon when pages are scarce.
+ */
+void pmap_collect(pmap_t p)
+{
+ pt_entry_t *ptp;
+ pt_entry_t *eptp;
+ phys_addr_t pa;
+ int spl, wired;
+
+ if (p == PMAP_NULL)
+ return;
+
+ if (p == kernel_pmap)
+ return;
+
+ /*
+ * Free the page table tree.
+ */
+ PMAP_READ_LOCK(p, spl);
+#if PAE
+#ifdef __x86_64__
+ for (int l4i = 0; l4i < lin2l4num(VM_MAX_USER_ADDRESS); l4i++) {
+ pt_entry_t pdp = (pt_entry_t) p->l4base[l4i];
+ if (!(pdp & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdpbase = (pt_entry_t*) ptetokv(pdp);
+ for (int l3i = 0; l3i < NPTES; l3i++)
+#else /* __x86_64__ */
+ pt_entry_t *pdpbase = p->pdpbase;
+ for (int l3i = 0; l3i < lin2pdpnum(VM_MAX_USER_ADDRESS); l3i++)
+#endif /* __x86_64__ */
+ {
+ pt_entry_t pde = (pt_entry_t ) pdpbase[l3i];
+ if (!(pde & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdebase = (pt_entry_t*) ptetokv(pde);
+ for (int l2i = 0; l2i < NPTES; l2i++)
+#else /* PAE */
+ pt_entry_t *pdebase = p->dirbase;
+ for (int l2i = 0; l2i < lin2pdenum(VM_MAX_USER_ADDRESS); l2i++)
+#endif /* PAE */
+ {
+ pt_entry_t pte = (pt_entry_t) pdebase[l2i];
+ if (!(pte & INTEL_PTE_VALID))
+ continue;
+
+ pa = pte_to_pa(pte);
+ ptp = (pt_entry_t *)phystokv(pa);
+ eptp = ptp + NPTES*ptes_per_vm_page;
+
+ /*
+ * If the pte page has any wired mappings, we cannot
+ * free it.
+ */
+ wired = 0;
+ {
+ pt_entry_t *ptep;
+ for (ptep = ptp; ptep < eptp; ptep++) {
+ if (*ptep & INTEL_PTE_WIRED) {
+ wired = 1;
+ break;
+ }
+ }
+ }
+ if (!wired) {
+ /*
+ * Remove the virtual addresses mapped by this pte page.
+ */
+ { /*XXX big hack*/
+ vm_offset_t va = pagenum2lin(l4i, l3i, l2i, 0);
+ if (p == kernel_pmap)
+ va = lintokv(va);
+ pmap_remove_range(p, va, ptp, eptp);
+ }
+
+ /*
+ * Invalidate the page directory pointer.
+ */
+ {
+ int i = ptes_per_vm_page;
+ pt_entry_t *pdep = &pdebase[l2i];
+ do {
+#ifdef MACH_PV_PAGETABLES
+ unsigned long pte = *pdep;
+ void *ptable = (void*) ptetokv(pte);
+ if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++)), 0)))
+ panic("%s:%d could not clear pde %p\n",__FILE__,__LINE__,pdep-1);
+ if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(ptable)))
+ panic("couldn't unpin page %p(%lx)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable)));
+ pmap_set_page_readwrite(ptable);
+#else /* MACH_PV_PAGETABLES */
+ *pdep++ = 0;
+#endif /* MACH_PV_PAGETABLES */
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(p, spl);
+
+ /*
+ * And free the pte page itself.
+ */
+ kmem_cache_free(&pt_cache, (vm_offset_t)ptetokv(pte));
+
+ PMAP_READ_LOCK(p, spl);
+
+ }
+ }
+#if PAE
+ // TODO check l2
+ }
+#ifdef __x86_64__
+ // TODO check l3
+ }
+#endif /* __x86_64__ */
+#endif /* PAE */
+
+ PMAP_UPDATE_TLBS(p, VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
+
+ PMAP_READ_UNLOCK(p, spl);
+ return;
+
+}
+
+#if MACH_KDB
+/*
+ * Routine: pmap_whatis
+ * Function:
+ * Check whether this address is within a pmap
+ * Usage:
+ * Called from debugger
+ */
+int pmap_whatis(pmap_t p, vm_offset_t a)
+{
+ pt_entry_t *ptp;
+ phys_addr_t pa;
+ int spl;
+ int ret = 0;
+
+ if (p == PMAP_NULL)
+ return 0;
+
+ PMAP_READ_LOCK(p, spl);
+#if PAE
+#ifdef __x86_64__
+ if (a >= (vm_offset_t) p->l4base && a < (vm_offset_t) (&p->l4base[NPTES])) {
+ db_printf("L4 for pmap %p\n", p);
+ ret = 1;
+ }
+ for (int l4i = 0; l4i < NPTES; l4i++) {
+ pt_entry_t pdp = (pt_entry_t) p->l4base[l4i];
+ if (!(pdp & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdpbase = (pt_entry_t*) ptetokv(pdp);
+#else /* __x86_64__ */
+ int l4i = 0;
+ pt_entry_t *pdpbase = p->pdpbase;
+#endif /* __x86_64__ */
+ if (a >= (vm_offset_t) pdpbase && a < (vm_offset_t) (&pdpbase[NPTES])) {
+ db_printf("PDP %d for pmap %p\n", l4i, p);
+ ret = 1;
+ }
+ for (int l3i = 0; l3i < NPTES; l3i++)
+ {
+ pt_entry_t pde = (pt_entry_t ) pdpbase[l3i];
+ if (!(pde & INTEL_PTE_VALID))
+ continue;
+ pt_entry_t *pdebase = (pt_entry_t*) ptetokv(pde);
+#else /* PAE */
+ int l4i = 0, l3i = 0;
+ pt_entry_t *pdebase = p->dirbase;
+#endif /* PAE */
+ if (a >= (vm_offset_t) pdebase && a < (vm_offset_t) (&pdebase[NPTES])) {
+ db_printf("PDE %d %d for pmap %p\n", l4i, l3i, p);
+ ret = 1;
+ }
+ for (int l2i = 0; l2i < NPTES; l2i++)
+ {
+ pt_entry_t pte = (pt_entry_t) pdebase[l2i];
+ if (!(pte & INTEL_PTE_VALID))
+ continue;
+
+ pa = pte_to_pa(pte);
+ ptp = (pt_entry_t *)phystokv(pa);
+
+ if (a >= (vm_offset_t) ptp && a < (vm_offset_t) (&ptp[NPTES*ptes_per_vm_page])) {
+ db_printf("PTP %d %d %d for pmap %p\n", l4i, l3i, l2i, p);
+ ret = 1;
+ }
+ }
+#if PAE
+ }
+#ifdef __x86_64__
+ }
+#endif /* __x86_64__ */
+#endif /* PAE */
+ PMAP_READ_UNLOCK(p, spl);
+
+ if (p == kernel_pmap) {
+ phys_addr_t pa;
+ if (DB_VALID_KERN_ADDR(a))
+ pa = kvtophys(a);
+ else
+ pa = pmap_extract(current_task()->map->pmap, a);
+
+ if (valid_page(pa)) {
+ unsigned long pai;
+ pv_entry_t pv_h;
+
+ pai = pa_index(pa);
+ for (pv_h = pai_to_pvh(pai);
+ pv_h && pv_h->pmap;
+ pv_h = pv_h->next)
+ db_printf("pmap %p at %llx\n", pv_h->pmap, pv_h->va);
+ }
+ }
+
+ return ret;
+}
+#endif /* MACH_KDB */
+
+/*
+ * Routine: pmap_activate
+ * Function:
+ * Binds the given physical map to the given
+ * processor, and returns a hardware map description.
+ */
+#if 0
+void pmap_activate(pmap_t my_pmap, thread_t th, int my_cpu)
+{
+ PMAP_ACTIVATE(my_pmap, th, my_cpu);
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_deactivate
+ * Function:
+ * Indicates that the given physical map is no longer
+ * in use on the specified processor. (This is a macro
+ * in pmap.h)
+ */
+#if 0
+void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu)
+{
+ PMAP_DEACTIVATE(pmap, th, which_cpu);
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_kernel
+ * Function:
+ * Returns the physical map handle for the kernel.
+ */
+#if 0
+pmap_t pmap_kernel()
+{
+ return (kernel_pmap);
+}
+#endif /* 0 */
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 0
+pmap_zero_page(vm_offset_t phys)
+{
+ int i;
+
+ assert(phys != vm_page_fictitious_addr);
+ i = PAGE_SIZE / INTEL_PGBYTES;
+ phys = intel_pfn(phys);
+
+ while (i--)
+ zero_phys(phys++);
+}
+#endif /* 0 */
+
+/*
+ * pmap_copy_page copies the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 0
+pmap_copy_page(vm_offset_t src, vm_offset_t dst)
+{
+ int i;
+
+ assert(src != vm_page_fictitious_addr);
+ assert(dst != vm_page_fictitious_addr);
+ i = PAGE_SIZE / INTEL_PGBYTES;
+
+ while (i--) {
+ copy_phys(intel_pfn(src), intel_pfn(dst));
+ src += INTEL_PGBYTES;
+ dst += INTEL_PGBYTES;
+ }
+}
+#endif /* 0 */
+
+/*
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ *
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
+ */
+void
+pmap_pageable(
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end,
+ boolean_t pageable)
+{
+}
+
+/*
+ * Clear specified attribute bits.
+ */
+static void
+phys_attribute_clear(
+ phys_addr_t phys,
+ int bits)
+{
+ pv_entry_t pv_h;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ unsigned long pai;
+ pmap_t pmap;
+ int spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, clearing all modify or reference bits.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+ vm_offset_t va;
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ assert(pte_to_pa(*pte) == phys);
+
+ /*
+ * Clear modify or reference bits.
+ */
+ {
+ int i = ptes_per_vm_page;
+ do {
+#ifdef MACH_PV_PAGETABLES
+ if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~bits)))
+ panic("%s:%d could not clear bits %x from pte %p\n",__FILE__,__LINE__,bits,pte);
+#else /* MACH_PV_PAGETABLES */
+ *pte &= ~bits;
+#endif /* MACH_PV_PAGETABLES */
+ } while (--i > 0);
+ }
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ simple_unlock(&pmap->lock);
+ }
+ }
+
+ pmap_phys_attributes[pai] &= ~bits;
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Check specified attribute bits.
+ */
+static boolean_t
+phys_attribute_test(
+ phys_addr_t phys,
+ int bits)
+{
+ pv_entry_t pv_h;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
+ unsigned long pai;
+ pmap_t pmap;
+ int spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return (FALSE);
+ }
+
+ /*
+ * Lock the pmap system first, since we will be checking
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ if (pmap_phys_attributes[pai] & bits) {
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+
+ /*
+ * Walk down PV list, checking all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & INTEL_PTE_VALID);
+ assert(pte_to_pa(*pte) == phys);
+ }
+
+ /*
+ * Check modify or reference bits.
+ */
+ {
+ int i = ptes_per_vm_page;
+
+ do {
+ if (*pte & bits) {
+ simple_unlock(&pmap->lock);
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+ } while (--i > 0);
+ }
+ simple_unlock(&pmap->lock);
+ }
+ }
+ PMAP_WRITE_UNLOCK(spl);
+ return (FALSE);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+
+void pmap_clear_modify(phys_addr_t phys)
+{
+ phys_attribute_clear(phys, PHYS_MODIFIED);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_modified(phys_addr_t phys)
+{
+ return (phys_attribute_test(phys, PHYS_MODIFIED));
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+
+void pmap_clear_reference(phys_addr_t phys)
+{
+ phys_attribute_clear(phys, PHYS_REFERENCED);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_referenced(phys_addr_t phys)
+{
+ return (phys_attribute_test(phys, PHYS_REFERENCED));
+}
+
+#if NCPUS > 1
+/*
+* TLB Coherence Code (TLB "shootdown" code)
+*
+* Threads that belong to the same task share the same address space and
+* hence share a pmap. However, they may run on distinct cpus and thus
+* have distinct TLBs that cache page table entries. In order to guarantee
+* the TLBs are consistent, whenever a pmap is changed, all threads that
+* are active in that pmap must have their TLB updated. To keep track of
+* this information, the set of cpus that are currently using a pmap is
+* maintained within each pmap structure (cpus_using). Pmap_activate() and
+* pmap_deactivate add and remove, respectively, a cpu from this set.
+* Since the TLBs are not addressable over the bus, each processor must
+* flush its own TLB; a processor that needs to invalidate another TLB
+* needs to interrupt the processor that owns that TLB to signal the
+* update.
+*
+* Whenever a pmap is updated, the lock on that pmap is locked, and all
+* cpus using the pmap are signaled to invalidate. All threads that need
+* to activate a pmap must wait for the lock to clear to await any updates
+* in progress before using the pmap. They must ACQUIRE the lock to add
+* their cpu to the cpus_using set. An implicit assumption made
+* throughout the TLB code is that all kernel code that runs at or higher
+* than splvm blocks out update interrupts, and that such code does not
+* touch pageable pages.
+*
+* A shootdown interrupt serves another function besides signaling a
+* processor to invalidate. The interrupt routine (pmap_update_interrupt)
+* waits for the both the pmap lock (and the kernel pmap lock) to clear,
+* preventing user code from making implicit pmap updates while the
+* sending processor is performing its update. (This could happen via a
+* user data write reference that turns on the modify bit in the page
+* table). It must wait for any kernel updates that may have started
+* concurrently with a user pmap update because the IPC code
+* changes mappings.
+* Spinning on the VALUES of the locks is sufficient (rather than
+* having to acquire the locks) because any updates that occur subsequent
+* to finding the lock unlocked will be signaled via another interrupt.
+* (This assumes the interrupt is cleared before the low level interrupt code
+* calls pmap_update_interrupt()).
+*
+* The signaling processor must wait for any implicit updates in progress
+* to terminate before continuing with its update. Thus it must wait for an
+* acknowledgement of the interrupt from each processor for which such
+* references could be made. For maintaining this information, a set
+* cpus_active is used. A cpu is in this set if and only if it can
+* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
+* this set; when all such cpus are removed, it is safe to update.
+*
+* Before attempting to acquire the update lock on a pmap, a cpu (A) must
+* be at least at the priority of the interprocessor interrupt
+* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
+* kernel update; it would spin forever in pmap_update_interrupt() trying
+* to acquire the user pmap lock it had already acquired. Furthermore A
+* must remove itself from cpus_active. Otherwise, another cpu holding
+* the lock (B) could be in the process of sending an update signal to A,
+* and thus be waiting for A to remove itself from cpus_active. If A is
+* spinning on the lock at priority this will never happen and a deadlock
+* will result.
+*/
+
+/*
+ * Signal another CPU that it must flush its TLB
+ */
+void signal_cpus(
+ cpu_set use_list,
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end)
+{
+ int which_cpu, j;
+ pmap_update_list_t update_list_p;
+
+ while ((which_cpu = __builtin_ffs(use_list)) != 0) {
+ which_cpu -= 1; /* convert to 0 origin */
+
+ update_list_p = &cpu_update_list[which_cpu];
+ simple_lock(&update_list_p->lock);
+
+ j = update_list_p->count;
+ if (j >= UPDATE_LIST_SIZE) {
+ /*
+ * list overflowed. Change last item to
+ * indicate overflow.
+ */
+ update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
+ update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_USER_ADDRESS;
+ update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS;
+ }
+ else {
+ update_list_p->item[j].pmap = pmap;
+ update_list_p->item[j].start = start;
+ update_list_p->item[j].end = end;
+ update_list_p->count = j+1;
+ }
+ cpu_update_needed[which_cpu] = TRUE;
+ simple_unlock(&update_list_p->lock);
+
+ __sync_synchronize();
+ if (((cpus_idle & (1 << which_cpu)) == 0))
+ interrupt_processor(which_cpu);
+ use_list &= ~(1 << which_cpu);
+ }
+}
+
+void process_pmap_updates(pmap_t my_pmap)
+{
+ int my_cpu = cpu_number();
+ pmap_update_list_t update_list_p;
+ int j;
+ pmap_t pmap;
+
+ update_list_p = &cpu_update_list[my_cpu];
+ simple_lock(&update_list_p->lock);
+
+ for (j = 0; j < update_list_p->count; j++) {
+ pmap = update_list_p->item[j].pmap;
+ if (pmap == my_pmap ||
+ pmap == kernel_pmap) {
+
+ INVALIDATE_TLB(pmap,
+ update_list_p->item[j].start,
+ update_list_p->item[j].end);
+ }
+ }
+ update_list_p->count = 0;
+ cpu_update_needed[my_cpu] = FALSE;
+ simple_unlock(&update_list_p->lock);
+}
+
+/*
+ * Interrupt routine for TBIA requested from other processor.
+ */
+void pmap_update_interrupt(void)
+{
+ int my_cpu;
+ pmap_t my_pmap;
+ int s;
+
+ my_cpu = cpu_number();
+
+ /*
+ * Exit now if we're idle. We'll pick up the update request
+ * when we go active, and we must not put ourselves back in
+ * the active set because we'll never process the interrupt
+ * while we're idle (thus hanging the system).
+ */
+ if (cpus_idle & (1 << my_cpu))
+ return;
+
+ if (current_thread() == THREAD_NULL)
+ my_pmap = kernel_pmap;
+ else {
+ my_pmap = current_pmap();
+ if (!pmap_in_use(my_pmap, my_cpu))
+ my_pmap = kernel_pmap;
+ }
+
+ /*
+ * Raise spl to splvm (above splip) to block out pmap_extract
+ * from IO code (which would put this cpu back in the active
+ * set).
+ */
+ s = splvm();
+
+ do {
+
+ /*
+ * Indicate that we're not using either user or kernel
+ * pmap.
+ */
+ i_bit_clear(my_cpu, &cpus_active);
+
+ /*
+ * Wait for any pmap updates in progress, on either user
+ * or kernel pmap.
+ */
+ while (my_pmap->lock.lock_data ||
+ kernel_pmap->lock.lock_data)
+ cpu_pause();
+
+ process_pmap_updates(my_pmap);
+
+ i_bit_set(my_cpu, &cpus_active);
+
+ } while (cpu_update_needed[my_cpu]);
+
+ splx(s);
+}
+#else /* NCPUS > 1 */
+/*
+ * Dummy routine to satisfy external reference.
+ */
+void pmap_update_interrupt(void)
+{
+ /* should never be called. */
+}
+#endif /* NCPUS > 1 */
+
+#if defined(__i386__) || defined (__x86_64__)
+/* Unmap page 0 to trap NULL references. */
+void
+pmap_unmap_page_zero (void)
+{
+ int *pte;
+
+ printf("Unmapping the zero page. Some BIOS functions may not be working any more.\n");
+ pte = (int *) pmap_pte (kernel_pmap, 0);
+ if (!pte)
+ return;
+ assert (pte);
+#ifdef MACH_PV_PAGETABLES
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), 0))
+ printf("couldn't unmap page 0\n");
+#else /* MACH_PV_PAGETABLES */
+ *pte = 0;
+ INVALIDATE_TLB(kernel_pmap, 0, PAGE_SIZE);
+#endif /* MACH_PV_PAGETABLES */
+}
+#endif /* __i386__ */
+
+void
+pmap_make_temporary_mapping(void)
+{
+ int i;
+ /*
+ * We'll have to temporarily install a direct mapping
+ * between physical memory and low linear memory,
+ * until we start using our new kernel segment descriptors.
+ */
+#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+ vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS;
+ if ((vm_offset_t)(-delta) < delta)
+ delta = (vm_offset_t)(-delta);
+ int nb_direct = delta >> PDESHIFT;
+ for (i = 0; i < nb_direct; i++)
+ kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] =
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS) + i];
+#endif
+
+#ifdef LINUX_DEV
+ /* We need BIOS memory mapped at 0xc0000 & co for BIOS accesses */
+#if VM_MIN_KERNEL_ADDRESS != 0
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] =
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
+#endif
+#endif /* LINUX_DEV */
+
+#ifdef MACH_PV_PAGETABLES
+#ifndef __x86_64__
+ const int PDPNUM_KERNEL = PDPNUM;
+#endif
+ for (i = 0; i < PDPNUM_KERNEL; i++)
+ pmap_set_page_readonly_init((void*) kernel_page_dir + i * INTEL_PGBYTES);
+#if PAE
+#ifndef __x86_64__
+ pmap_set_page_readonly_init(kernel_pmap->pdpbase);
+#endif
+#endif /* PAE */
+#endif /* MACH_PV_PAGETABLES */
+
+ pmap_set_page_dir();
+}
+
+void
+pmap_set_page_dir(void)
+{
+#if PAE
+#ifdef __x86_64__
+ set_cr3((unsigned long)_kvtophys(kernel_pmap->l4base));
+#else
+ set_cr3((unsigned long)_kvtophys(kernel_pmap->pdpbase));
+#endif
+#ifndef MACH_HYP
+ if (!CPU_HAS_FEATURE(CPU_FEATURE_PAE))
+ panic("CPU doesn't have support for PAE.");
+ set_cr4(get_cr4() | CR4_PAE);
+#endif /* MACH_HYP */
+#else
+ set_cr3((unsigned long)_kvtophys(kernel_page_dir));
+#endif /* PAE */
+}
+
+void
+pmap_remove_temporary_mapping(void)
+{
+#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
+ int i;
+ vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS;
+ if ((vm_offset_t)(-delta) < delta)
+ delta = (vm_offset_t)(-delta);
+ int nb_direct = delta >> PDESHIFT;
+ /* Get rid of the temporary direct mapping and flush it out of the TLB. */
+ for (i = 0 ; i < nb_direct; i++) {
+#ifdef MACH_XEN
+#ifdef MACH_PSEUDO_PHYS
+ if (!hyp_mmu_update_pte(kv_to_ma(&kernel_page_dir[lin2pdenum_cont(VM_MIN_KERNEL_ADDRESS) + i]), 0))
+#else /* MACH_PSEUDO_PHYS */
+ if (hyp_do_update_va_mapping(VM_MIN_KERNEL_ADDRESS + i * INTEL_PGBYTES, 0, UVMF_INVLPG | UVMF_ALL))
+#endif /* MACH_PSEUDO_PHYS */
+ printf("couldn't unmap frame %d\n", i);
+#else /* MACH_XEN */
+ kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] = 0;
+#endif /* MACH_XEN */
+ }
+#endif
+
+#ifdef LINUX_DEV
+ /* Keep BIOS memory mapped */
+#if VM_MIN_KERNEL_ADDRESS != 0
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] =
+ kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
+#endif
+#endif /* LINUX_DEV */
+
+ /* Not used after boot, better give it back. */
+#ifdef MACH_XEN
+ hyp_free_page(0, (void*) VM_MIN_KERNEL_ADDRESS);
+#endif /* MACH_XEN */
+
+ flush_tlb();
+}
diff --git a/riscv/intel/pmap.h b/riscv/intel/pmap.h
new file mode 100644
index 0000000..d745aad
--- /dev/null
+++ b/riscv/intel/pmap.h
@@ -0,0 +1,574 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pmap.h
+ *
+ * Authors: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine-dependent structures for the physical map module.
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_ 1
+
+#ifndef __ASSEMBLER__
+
+#include <kern/lock.h>
+#include <mach/machine/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <mach/kern_return.h>
+#include <mach/vm_prot.h>
+#include <riscv/proc_reg.h>
+
+/*
+ * Define the generic in terms of the specific
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define INTEL_PGBYTES I386_PGBYTES
+#define INTEL_PGSHIFT I386_PGSHIFT
+#define intel_btop(x) i386_btop(x)
+#define intel_ptob(x) i386_ptob(x)
+#define intel_round_page(x) i386_round_page(x)
+#define intel_trunc_page(x) i386_trunc_page(x)
+#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
+#define round_intel_to_vm(x) round_i386_to_vm(x)
+#define vm_to_intel(x) vm_to_i386(x)
+#endif /* __i386__ */
+
+/*
+ * i386/i486 Page Table Entry
+ */
+
+typedef phys_addr_t pt_entry_t;
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#endif /* __ASSEMBLER__ */
+
+#define INTEL_OFFMASK 0xfff /* offset within page */
+#if PAE
+#ifdef __x86_64__
+#define L4SHIFT 39 /* L4 shift */
+#define L4MASK 0x1ff /* mask for L4 index */
+#define PDPNUM_KERNEL (((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) >> PDPSHIFT) + 1)
+#define PDPMASK 0x1ff /* mask for page directory pointer index */
+#else /* __x86_64__ */
+#define PDPNUM 4 /* number of page directory pointers */
+#define PDPMASK 3 /* mask for page directory pointer index */
+#endif /* __x86_64__ */
+#define PDPSHIFT 30 /* page directory pointer */
+#define PDESHIFT 21 /* page descriptor shift */
+#define PDEMASK 0x1ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x1ff /* mask for page table index */
+#else /* PAE */
+#define PDPNUM 1 /* number of page directory pointers */
+#define PDESHIFT 22 /* page descriptor shift */
+#define PDEMASK 0x3ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x3ff /* mask for page table index */
+#endif /* PAE */
+
+/*
+ * Convert linear offset to L4 pointer index
+ */
+#ifdef __x86_64__
+#define lin2l4num(a) (((a) >> L4SHIFT) & L4MASK)
+#endif
+
+/*
+ * Convert linear offset to page descriptor index
+ */
+#define lin2pdenum(a) (((a) >> PDESHIFT) & PDEMASK)
+
+#if PAE
+/* Special version assuming contiguous page directories. Making it
+ include the page directory pointer table index too. */
+#ifdef __x86_64__
+#define lin2pdenum_cont(a) (((a) >> PDESHIFT) & 0x3ff)
+#else
+#define lin2pdenum_cont(a) (((a) >> PDESHIFT) & 0x7ff)
+#endif
+#else
+#define lin2pdenum_cont(a) lin2pdenum(a)
+#endif
+
+/*
+ * Convert linear offset to page directory pointer index
+ */
+#if PAE
+#define lin2pdpnum(a) (((a) >> PDPSHIFT) & PDPMASK)
+#endif
+
+/*
+ * Convert page descriptor index to linear address
+ */
+#define pdenum2lin(a) ((vm_offset_t)(a) << PDESHIFT)
+
+#if PAE
+#ifdef __x86_64__
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l4num) << L4SHIFT) + \
+ ((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#else /* __x86_64__ */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+#else /* PAE */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+
+
+/*
+ * Convert linear offset to page table index
+ */
+#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
+
+#define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
+#define NPDES (PDPNUM * (intel_ptob(1)/sizeof(pt_entry_t)))
+
+/*
+ * Hardware pte bit definitions (to be used directly on the ptes
+ * without using the bit fields).
+ */
+
+#define INTEL_PTE_VALID 0x00000001
+#define INTEL_PTE_WRITE 0x00000002
+#define INTEL_PTE_USER 0x00000004
+#define INTEL_PTE_WTHRU 0x00000008
+#define INTEL_PTE_NCACHE 0x00000010
+#define INTEL_PTE_REF 0x00000020
+#define INTEL_PTE_MOD 0x00000040
+#define INTEL_PTE_PS 0x00000080
+#ifdef MACH_PV_PAGETABLES
+/* Not supported */
+#define INTEL_PTE_GLOBAL 0x00000000
+#else /* MACH_PV_PAGETABLES */
+#define INTEL_PTE_GLOBAL 0x00000100
+#endif /* MACH_PV_PAGETABLES */
+#define INTEL_PTE_WIRED 0x00000200
+#ifdef PAE
+#ifdef __x86_64__
+#define INTEL_PTE_PFN 0xfffffffffffff000ULL
+#else /* __x86_64__ */
+#define INTEL_PTE_PFN 0x00007ffffffff000ULL
+#endif/* __x86_64__ */
+#else
+#define INTEL_PTE_PFN 0xfffff000
+#endif
+
+#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
+#ifdef MACH_PSEUDO_PHYS
+#define pte_to_pa(p) ma_to_pa((p) & INTEL_PTE_PFN)
+#else /* MACH_PSEUDO_PHYS */
+#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
+#endif /* MACH_PSEUDO_PHYS */
+#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
+
+/*
+ * Convert page table entry to kernel virtual address
+ */
+#define ptetokv(a) (phystokv(pte_to_pa(a)))
+
+#ifndef __ASSEMBLER__
+typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
+ /* changed by other processors */
+
+struct pmap {
+#if ! PAE
+ pt_entry_t *dirbase; /* page directory table */
+#else /* PAE */
+#ifdef __x86_64__
+ pt_entry_t *l4base; /* l4 table */
+#ifdef MACH_HYP
+ pt_entry_t *user_l4base; /* Userland l4 table */
+ pt_entry_t *user_pdpbase; /* Userland l4 table */
+#endif /* MACH_HYP */
+#else /* x86_64 */
+ pt_entry_t *pdpbase; /* page directory pointer table */
+#endif /* x86_64 */
+#endif /* PAE */
+ int ref_count; /* reference count */
+ decl_simple_lock_data(,lock)
+ /* lock on map */
+ struct pmap_statistics stats; /* map statistics */
+ cpu_set cpus_using; /* bitmap of cpus using pmap */
+};
+
+typedef struct pmap *pmap_t;
+
+#define PMAP_NULL ((pmap_t) 0)
+
+#ifdef MACH_PV_PAGETABLES
+extern void pmap_set_page_readwrite(void *addr);
+extern void pmap_set_page_readonly(void *addr);
+extern void pmap_set_page_readonly_init(void *addr);
+extern void pmap_map_mfn(void *addr, unsigned long mfn);
+extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
+#endif /* MACH_PV_PAGETABLES */
+
+#if PAE
+#ifdef __x86_64__
+/* TODO: support PCID */
+#ifdef MACH_HYP
+#define set_pmap(pmap) \
+ MACRO_BEGIN \
+ set_cr3(kvtophys((vm_offset_t)(pmap)->l4base)); \
+ if (pmap->user_l4base) \
+ if (!hyp_set_user_cr3(kvtophys((vm_offset_t)(pmap)->user_l4base))) \
+ panic("set_user_cr3"); \
+ MACRO_END
+#else /* MACH_HYP */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->l4base))
+#endif /* MACH_HYP */
+#else /* x86_64 */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->pdpbase))
+#endif /* x86_64 */
+#else /* PAE */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->dirbase))
+#endif /* PAE */
+
+typedef struct {
+ pt_entry_t *entry;
+ vm_offset_t vaddr;
+} pmap_mapwindow_t;
+
+extern pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry);
+extern void pmap_put_mapwindow(pmap_mapwindow_t *map);
+
+#define PMAP_NMAPWINDOWS 2 /* Per CPU */
+
+#if NCPUS > 1
+/*
+ * List of cpus that are actively using mapped memory. Any
+ * pmap update operation must wait for all cpus in this list.
+ * Update operations must still be queued to cpus not in this
+ * list.
+ */
+extern cpu_set cpus_active;
+
+/*
+ * List of cpus that are idle, but still operating, and will want
+ * to see any kernel pmap updates when they become active.
+ */
+extern cpu_set cpus_idle;
+
+/*
+ * Quick test for pmap update requests.
+ */
+extern volatile
+boolean_t cpu_update_needed[NCPUS];
+
+/*
+ * External declarations for PMAP_ACTIVATE.
+ */
+
+void process_pmap_updates(pmap_t);
+extern pmap_t kernel_pmap;
+
+#endif /* NCPUS > 1 */
+
+void pmap_update_interrupt(void);
+
+/*
+ * Machine dependent routines that are used only for i386/i486.
+ */
+
+pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
+
+/*
+ * Macros for speed.
+ */
+
+#if NCPUS > 1
+
+/*
+ * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
+ * fields to control TLB invalidation on other CPUS.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&kernel_pmap->lock); \
+ \
+ /* \
+ * Process invalidate requests for the kernel pmap. \
+ */ \
+ if (cpu_update_needed[(my_cpu)]) \
+ process_pmap_updates(kernel_pmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&kernel_pmap->lock); \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ \
+ if (tpmap == kernel_pmap) { \
+ /* \
+ * If this is the kernel pmap, switch to its page tables. \
+ */ \
+ set_pmap(tpmap); \
+ } \
+ else { \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&tpmap->lock); \
+ \
+ /* \
+ * No need to invalidate the TLB - the entire user pmap \
+ * will be invalidated by reloading dirbase. \
+ */ \
+ set_pmap(tpmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &tpmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&tpmap->lock); \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ \
+ /* \
+ * Do nothing if this is the kernel pmap. \
+ */ \
+ if (tpmap != kernel_pmap) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &(pmap)->cpus_using); \
+ } \
+}
+
+#define MARK_CPU_IDLE(my_cpu) { \
+ /* \
+ * Mark this cpu idle, and remove it from the active set, \
+ * since it is not actively using any pmap. Signal_cpus \
+ * will notice that it is idle, and avoid signaling it, \
+ * but will queue the update request for when the cpu \
+ * becomes active. \
+ */ \
+ int s = splvm(); \
+ i_bit_set((my_cpu), &cpus_idle); \
+ i_bit_clear((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#define MARK_CPU_ACTIVE(my_cpu) { \
+ \
+ int s = splvm(); \
+ /* \
+ * If a kernel_pmap update was requested while this cpu \
+ * was idle, process it as if we got the interrupt. \
+ * Before doing so, remove this cpu from the idle set. \
+ * Since we do not grab any pmap locks while we flush \
+ * our TLB, another cpu may start an update operation \
+ * before we finish. Removing this cpu from the idle \
+ * set assures that we will receive another update \
+ * interrupt if this happens. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_idle); \
+ __sync_synchronize(); \
+ \
+ if (cpu_update_needed[(my_cpu)]) \
+ pmap_update_interrupt(); \
+ \
+ /* \
+ * Mark that this cpu is now active. \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#else /* NCPUS > 1 */
+
+/*
+ * With only one CPU, we just have to indicate whether the pmap is
+ * in use.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ (void) (my_cpu); \
+ kernel_pmap->cpus_using = TRUE; \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ (void) (my_cpu); \
+ kernel_pmap->cpus_using = FALSE; \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ (void) (th); \
+ (void) (my_cpu); \
+ \
+ set_pmap(tpmap); \
+ if (tpmap != kernel_pmap) { \
+ tpmap->cpus_using = TRUE; \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+ (void) (thread); \
+ (void) (cpu); \
+ if ((pmap) != kernel_pmap) \
+ (pmap)->cpus_using = FALSE; \
+}
+
+#endif /* NCPUS > 1 */
+
+#define PMAP_CONTEXT(pmap, thread)
+
+#define pmap_kernel() (kernel_pmap)
+#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
+#define pmap_phys_address(frame) ((intel_ptob((phys_addr_t) frame)))
+#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
+#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
+#define pmap_attribute(pmap,addr,size,attr,value) \
+ (KERN_INVALID_ADDRESS)
+
+extern pt_entry_t *kernel_page_dir;
+
+extern vm_offset_t kernel_virtual_start;
+extern vm_offset_t kernel_virtual_end;
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Allocate the kernel page directory and page tables,
+ * and direct-map all physical memory.
+ * Called with mapping off.
+ */
+extern void pmap_bootstrap(void);
+
+extern void pmap_set_page_dir(void);
+extern void pmap_make_temporary_mapping(void);
+extern void pmap_remove_temporary_mapping(void);
+
+extern void pmap_unmap_page_zero (void);
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ */
+extern void pmap_zero_page (phys_addr_t);
+
+/*
+ * pmap_copy_page copies the specified (machine independent) pages.
+ */
+extern void pmap_copy_page (phys_addr_t, phys_addr_t);
+
+/*
+ * copy_to_phys(src_addr_v, dst_addr_p, count)
+ *
+ * Copy virtual memory to physical memory
+ */
+extern void
+copy_to_phys(
+ vm_offset_t src_addr_v,
+ phys_addr_t dst_addr_p,
+ int count);
+
+/*
+ * copy_from_phys(src_addr_p, dst_addr_v, count)
+ *
+ * Copy physical memory to virtual memory. The virtual memory
+ * is assumed to be present (e.g. the buffer pool).
+ */
+extern void
+copy_from_phys(
+ phys_addr_t src_addr_p,
+ vm_offset_t dst_addr_v,
+ int count);
+
+/*
+ * kvtophys(addr)
+ *
+ * Convert a kernel virtual address to a physical address
+ */
+extern phys_addr_t kvtophys (vm_offset_t);
+
+#if NCPUS > 1
+void signal_cpus(
+ cpu_set use_list,
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end);
+#endif /* NCPUS > 1 */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _PMAP_MACHINE_ */
diff --git a/riscv/intel/pmap.h~ b/riscv/intel/pmap.h~
new file mode 100644
index 0000000..8b0eba0
--- /dev/null
+++ b/riscv/intel/pmap.h~
@@ -0,0 +1,574 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: pmap.h
+ *
+ * Authors: Avadis Tevanian, Jr., Michael Wayne Young
+ * Date: 1985
+ *
+ * Machine-dependent structures for the physical map module.
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_ 1
+
+#ifndef __ASSEMBLER__
+
+#include <kern/lock.h>
+#include <mach/machine/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <mach/kern_return.h>
+#include <mach/vm_prot.h>
+#include <i386/proc_reg.h>
+
+/*
+ * Define the generic in terms of the specific
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define INTEL_PGBYTES I386_PGBYTES
+#define INTEL_PGSHIFT I386_PGSHIFT
+#define intel_btop(x) i386_btop(x)
+#define intel_ptob(x) i386_ptob(x)
+#define intel_round_page(x) i386_round_page(x)
+#define intel_trunc_page(x) i386_trunc_page(x)
+#define trunc_intel_to_vm(x) trunc_i386_to_vm(x)
+#define round_intel_to_vm(x) round_i386_to_vm(x)
+#define vm_to_intel(x) vm_to_i386(x)
+#endif /* __i386__ */
+
+/*
+ * i386/i486 Page Table Entry
+ */
+
+typedef phys_addr_t pt_entry_t;
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#endif /* __ASSEMBLER__ */
+
+#define INTEL_OFFMASK 0xfff /* offset within page */
+#if PAE
+#ifdef __x86_64__
+#define L4SHIFT 39 /* L4 shift */
+#define L4MASK 0x1ff /* mask for L4 index */
+#define PDPNUM_KERNEL (((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) >> PDPSHIFT) + 1)
+#define PDPMASK 0x1ff /* mask for page directory pointer index */
+#else /* __x86_64__ */
+#define PDPNUM 4 /* number of page directory pointers */
+#define PDPMASK 3 /* mask for page directory pointer index */
+#endif /* __x86_64__ */
+#define PDPSHIFT 30 /* page directory pointer */
+#define PDESHIFT 21 /* page descriptor shift */
+#define PDEMASK 0x1ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x1ff /* mask for page table index */
+#else /* PAE */
+#define PDPNUM 1 /* number of page directory pointers */
+#define PDESHIFT 22 /* page descriptor shift */
+#define PDEMASK 0x3ff /* mask for page descriptor index */
+#define PTESHIFT 12 /* page table shift */
+#define PTEMASK 0x3ff /* mask for page table index */
+#endif /* PAE */
+
+/*
+ * Convert linear offset to L4 pointer index
+ */
+#ifdef __x86_64__
+#define lin2l4num(a) (((a) >> L4SHIFT) & L4MASK)
+#endif
+
+/*
+ * Convert linear offset to page descriptor index
+ */
+#define lin2pdenum(a) (((a) >> PDESHIFT) & PDEMASK)
+
+#if PAE
+/* Special version assuming contiguous page directories. Making it
+ include the page directory pointer table index too. */
+#ifdef __x86_64__
+#define lin2pdenum_cont(a) (((a) >> PDESHIFT) & 0x3ff)
+#else
+#define lin2pdenum_cont(a) (((a) >> PDESHIFT) & 0x7ff)
+#endif
+#else
+#define lin2pdenum_cont(a) lin2pdenum(a)
+#endif
+
+/*
+ * Convert linear offset to page directory pointer index
+ */
+#if PAE
+#define lin2pdpnum(a) (((a) >> PDPSHIFT) & PDPMASK)
+#endif
+
+/*
+ * Convert page descriptor index to linear address
+ */
+#define pdenum2lin(a) ((vm_offset_t)(a) << PDESHIFT)
+
+#if PAE
+#ifdef __x86_64__
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l4num) << L4SHIFT) + \
+ ((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#else /* __x86_64__ */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l3num) << PDPSHIFT) + \
+ ((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+#else /* PAE */
+#define pagenum2lin(l4num, l3num, l2num, l1num) \
+ (((vm_offset_t)(l2num) << PDESHIFT) + \
+ ((vm_offset_t)(l1num) << PTESHIFT))
+#endif
+
+
+/*
+ * Convert linear offset to page table index
+ */
+#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK)
+
+#define NPTES (intel_ptob(1)/sizeof(pt_entry_t))
+#define NPDES (PDPNUM * (intel_ptob(1)/sizeof(pt_entry_t)))
+
+/*
+ * Hardware pte bit definitions (to be used directly on the ptes
+ * without using the bit fields).
+ */
+
+#define INTEL_PTE_VALID 0x00000001
+#define INTEL_PTE_WRITE 0x00000002
+#define INTEL_PTE_USER 0x00000004
+#define INTEL_PTE_WTHRU 0x00000008
+#define INTEL_PTE_NCACHE 0x00000010
+#define INTEL_PTE_REF 0x00000020
+#define INTEL_PTE_MOD 0x00000040
+#define INTEL_PTE_PS 0x00000080
+#ifdef MACH_PV_PAGETABLES
+/* Not supported */
+#define INTEL_PTE_GLOBAL 0x00000000
+#else /* MACH_PV_PAGETABLES */
+#define INTEL_PTE_GLOBAL 0x00000100
+#endif /* MACH_PV_PAGETABLES */
+#define INTEL_PTE_WIRED 0x00000200
+#ifdef PAE
+#ifdef __x86_64__
+#define INTEL_PTE_PFN 0xfffffffffffff000ULL
+#else /* __x86_64__ */
+#define INTEL_PTE_PFN 0x00007ffffffff000ULL
+#endif/* __x86_64__ */
+#else
+#define INTEL_PTE_PFN 0xfffff000
+#endif
+
+#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
+#ifdef MACH_PSEUDO_PHYS
+#define pte_to_pa(p) ma_to_pa((p) & INTEL_PTE_PFN)
+#else /* MACH_PSEUDO_PHYS */
+#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
+#endif /* MACH_PSEUDO_PHYS */
+#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
+
+/*
+ * Convert page table entry to kernel virtual address
+ */
+#define ptetokv(a) (phystokv(pte_to_pa(a)))
+
+#ifndef __ASSEMBLER__
+typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */
+ /* changed by other processors */
+
+struct pmap {
+#if ! PAE
+ pt_entry_t *dirbase; /* page directory table */
+#else /* PAE */
+#ifdef __x86_64__
+ pt_entry_t *l4base; /* l4 table */
+#ifdef MACH_HYP
+ pt_entry_t *user_l4base; /* Userland l4 table */
+ pt_entry_t *user_pdpbase; /* Userland l4 table */
+#endif /* MACH_HYP */
+#else /* x86_64 */
+ pt_entry_t *pdpbase; /* page directory pointer table */
+#endif /* x86_64 */
+#endif /* PAE */
+ int ref_count; /* reference count */
+ decl_simple_lock_data(,lock)
+ /* lock on map */
+ struct pmap_statistics stats; /* map statistics */
+ cpu_set cpus_using; /* bitmap of cpus using pmap */
+};
+
+typedef struct pmap *pmap_t;
+
+#define PMAP_NULL ((pmap_t) 0)
+
+#ifdef MACH_PV_PAGETABLES
+extern void pmap_set_page_readwrite(void *addr);
+extern void pmap_set_page_readonly(void *addr);
+extern void pmap_set_page_readonly_init(void *addr);
+extern void pmap_map_mfn(void *addr, unsigned long mfn);
+extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
+#endif /* MACH_PV_PAGETABLES */
+
+#if PAE
+#ifdef __x86_64__
+/* TODO: support PCID */
+#ifdef MACH_HYP
+#define set_pmap(pmap) \
+ MACRO_BEGIN \
+ set_cr3(kvtophys((vm_offset_t)(pmap)->l4base)); \
+ if (pmap->user_l4base) \
+ if (!hyp_set_user_cr3(kvtophys((vm_offset_t)(pmap)->user_l4base))) \
+ panic("set_user_cr3"); \
+ MACRO_END
+#else /* MACH_HYP */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->l4base))
+#endif /* MACH_HYP */
+#else /* x86_64 */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->pdpbase))
+#endif /* x86_64 */
+#else /* PAE */
+#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->dirbase))
+#endif /* PAE */
+
+typedef struct {
+ pt_entry_t *entry;
+ vm_offset_t vaddr;
+} pmap_mapwindow_t;
+
+extern pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry);
+extern void pmap_put_mapwindow(pmap_mapwindow_t *map);
+
+#define PMAP_NMAPWINDOWS 2 /* Per CPU */
+
+#if NCPUS > 1
+/*
+ * List of cpus that are actively using mapped memory. Any
+ * pmap update operation must wait for all cpus in this list.
+ * Update operations must still be queued to cpus not in this
+ * list.
+ */
+extern cpu_set cpus_active;
+
+/*
+ * List of cpus that are idle, but still operating, and will want
+ * to see any kernel pmap updates when they become active.
+ */
+extern cpu_set cpus_idle;
+
+/*
+ * Quick test for pmap update requests.
+ */
+extern volatile
+boolean_t cpu_update_needed[NCPUS];
+
+/*
+ * External declarations for PMAP_ACTIVATE.
+ */
+
+void process_pmap_updates(pmap_t);
+extern pmap_t kernel_pmap;
+
+#endif /* NCPUS > 1 */
+
+void pmap_update_interrupt(void);
+
+/*
+ * Machine dependent routines that are used only for i386/i486.
+ */
+
+pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
+
+/*
+ * Macros for speed.
+ */
+
+#if NCPUS > 1
+
+/*
+ * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
+ * fields to control TLB invalidation on other CPUS.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&kernel_pmap->lock); \
+ \
+ /* \
+ * Process invalidate requests for the kernel pmap. \
+ */ \
+ if (cpu_update_needed[(my_cpu)]) \
+ process_pmap_updates(kernel_pmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&kernel_pmap->lock); \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ \
+ if (tpmap == kernel_pmap) { \
+ /* \
+ * If this is the kernel pmap, switch to its page tables. \
+ */ \
+ set_pmap(tpmap); \
+ } \
+ else { \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&tpmap->lock); \
+ \
+ /* \
+ * No need to invalidate the TLB - the entire user pmap \
+ * will be invalidated by reloading dirbase. \
+ */ \
+ set_pmap(tpmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &tpmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&tpmap->lock); \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ \
+ /* \
+ * Do nothing if this is the kernel pmap. \
+ */ \
+ if (tpmap != kernel_pmap) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &(pmap)->cpus_using); \
+ } \
+}
+
+#define MARK_CPU_IDLE(my_cpu) { \
+ /* \
+ * Mark this cpu idle, and remove it from the active set, \
+ * since it is not actively using any pmap. Signal_cpus \
+ * will notice that it is idle, and avoid signaling it, \
+ * but will queue the update request for when the cpu \
+ * becomes active. \
+ */ \
+ int s = splvm(); \
+ i_bit_set((my_cpu), &cpus_idle); \
+ i_bit_clear((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#define MARK_CPU_ACTIVE(my_cpu) { \
+ \
+ int s = splvm(); \
+ /* \
+ * If a kernel_pmap update was requested while this cpu \
+ * was idle, process it as if we got the interrupt. \
+ * Before doing so, remove this cpu from the idle set. \
+ * Since we do not grab any pmap locks while we flush \
+ * our TLB, another cpu may start an update operation \
+ * before we finish. Removing this cpu from the idle \
+ * set assures that we will receive another update \
+ * interrupt if this happens. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_idle); \
+ __sync_synchronize(); \
+ \
+ if (cpu_update_needed[(my_cpu)]) \
+ pmap_update_interrupt(); \
+ \
+ /* \
+ * Mark that this cpu is now active. \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#else /* NCPUS > 1 */
+
+/*
+ * With only one CPU, we just have to indicate whether the pmap is
+ * in use.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ (void) (my_cpu); \
+ kernel_pmap->cpus_using = TRUE; \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ (void) (my_cpu); \
+ kernel_pmap->cpus_using = FALSE; \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ pmap_t tpmap = (pmap); \
+ (void) (th); \
+ (void) (my_cpu); \
+ \
+ set_pmap(tpmap); \
+ if (tpmap != kernel_pmap) { \
+ tpmap->cpus_using = TRUE; \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+ (void) (thread); \
+ (void) (cpu); \
+ if ((pmap) != kernel_pmap) \
+ (pmap)->cpus_using = FALSE; \
+}
+
+#endif /* NCPUS > 1 */
+
+#define PMAP_CONTEXT(pmap, thread)
+
+#define pmap_kernel() (kernel_pmap)
+#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
+#define pmap_phys_address(frame) ((intel_ptob((phys_addr_t) frame)))
+#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys)))
+#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
+#define pmap_attribute(pmap,addr,size,attr,value) \
+ (KERN_INVALID_ADDRESS)
+
+extern pt_entry_t *kernel_page_dir;
+
+extern vm_offset_t kernel_virtual_start;
+extern vm_offset_t kernel_virtual_end;
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Allocate the kernel page directory and page tables,
+ * and direct-map all physical memory.
+ * Called with mapping off.
+ */
+extern void pmap_bootstrap(void);
+
+extern void pmap_set_page_dir(void);
+extern void pmap_make_temporary_mapping(void);
+extern void pmap_remove_temporary_mapping(void);
+
+extern void pmap_unmap_page_zero (void);
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ */
+extern void pmap_zero_page (phys_addr_t);
+
+/*
+ * pmap_copy_page copies the specified (machine independent) pages.
+ */
+extern void pmap_copy_page (phys_addr_t, phys_addr_t);
+
+/*
+ * copy_to_phys(src_addr_v, dst_addr_p, count)
+ *
+ * Copy virtual memory to physical memory
+ */
+extern void
+copy_to_phys(
+ vm_offset_t src_addr_v,
+ phys_addr_t dst_addr_p,
+ int count);
+
+/*
+ * copy_from_phys(src_addr_p, dst_addr_v, count)
+ *
+ * Copy physical memory to virtual memory. The virtual memory
+ * is assumed to be present (e.g. the buffer pool).
+ */
+extern void
+copy_from_phys(
+ phys_addr_t src_addr_p,
+ vm_offset_t dst_addr_v,
+ int count);
+
+/*
+ * kvtophys(addr)
+ *
+ * Convert a kernel virtual address to a physical address
+ */
+extern phys_addr_t kvtophys (vm_offset_t);
+
+#if NCPUS > 1
+void signal_cpus(
+ cpu_set use_list,
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end);
+#endif /* NCPUS > 1 */
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _PMAP_MACHINE_ */
diff --git a/riscv/intel/read_fault.c b/riscv/intel/read_fault.c
new file mode 100644
index 0000000..0b79e3d
--- /dev/null
+++ b/riscv/intel/read_fault.c
@@ -0,0 +1,178 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#include <vm/vm_fault.h>
+#include <mach/kern_return.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+
+#include <kern/macros.h>
+
+#if !(__i486__ || __i586__ || __i686__)
+/*
+ * Expansion of vm_fault for read fault in kernel mode.
+ * Must enter the mapping as writable, since the i386
+ * ignores write protection in kernel mode.
+ */
+kern_return_t
+intel_read_fault(
+ vm_map_t map,
+ vm_offset_t vaddr)
+{
+ vm_map_version_t version; /* Map version for
+ verification */
+ vm_object_t object; /* Top-level object */
+ vm_offset_t offset; /* Top-level offset */
+ vm_prot_t prot; /* Protection for mapping */
+ vm_page_t result_page; /* Result of vm_fault_page */
+ vm_page_t top_page; /* Placeholder page */
+ boolean_t wired; /* Is map region wired? */
+ kern_return_t result;
+ vm_page_t m;
+
+ RetryFault:
+
+ /*
+ * Find the backing store object and offset into it
+ * to begin search.
+ */
+ result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
+ &object, &offset, &prot, &wired);
+ if (result != KERN_SUCCESS)
+ return (result);
+
+ /*
+ * Make a reference to this object to prevent its
+ * disposal while we are playing with it.
+ */
+ assert(object->ref_count > 0);
+ object->ref_count++;
+ vm_object_paging_begin(object);
+
+ result = vm_fault_page(object, offset, VM_PROT_READ, FALSE, TRUE,
+ &prot, &result_page, &top_page,
+ FALSE, (void (*)()) 0);
+
+ if (result != VM_FAULT_SUCCESS) {
+ vm_object_deallocate(object);
+
+ switch (result) {
+ case VM_FAULT_RETRY:
+ goto RetryFault;
+ case VM_FAULT_INTERRUPTED:
+ return (KERN_SUCCESS);
+ case VM_FAULT_MEMORY_SHORTAGE:
+ VM_PAGE_WAIT((void (*)()) 0);
+ goto RetryFault;
+ case VM_FAULT_FICTITIOUS_SHORTAGE:
+ vm_page_more_fictitious();
+ goto RetryFault;
+ case VM_FAULT_MEMORY_ERROR:
+ return (KERN_MEMORY_ERROR);
+ }
+ }
+
+ m = result_page;
+
+ /*
+ * How to clean up the result of vm_fault_page. This
+ * happens whether the mapping is entered or not.
+ */
+
+#define UNLOCK_AND_DEALLOCATE \
+ MACRO_BEGIN \
+ vm_fault_cleanup(m->object, top_page); \
+ vm_object_deallocate(object); \
+ MACRO_END
+
+ /*
+ * What to do with the resulting page from vm_fault_page
+ * if it doesn't get entered into the physical map:
+ */
+
+#define RELEASE_PAGE(m) \
+ MACRO_BEGIN \
+ PAGE_WAKEUP_DONE(m); \
+ vm_page_lock_queues(); \
+ if (!m->active && !m->inactive) \
+ vm_page_activate(m); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+ /*
+ * We must verify that the maps have not changed.
+ */
+ vm_object_unlock(m->object);
+ while (!vm_map_verify(map, &version)) {
+ vm_object_t retry_object;
+ vm_offset_t retry_offset;
+ vm_prot_t retry_prot;
+
+ result = vm_map_lookup(&map, vaddr, VM_PROT_READ, &version,
+ &retry_object, &retry_offset, &retry_prot,
+ &wired);
+ if (result != KERN_SUCCESS) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ return (result);
+ }
+
+ vm_object_unlock(retry_object);
+
+ if (retry_object != object || retry_offset != offset) {
+ vm_object_lock(m->object);
+ RELEASE_PAGE(m);
+ UNLOCK_AND_DEALLOCATE;
+ goto RetryFault;
+ }
+ }
+
+ /*
+ * Put the page in the physical map.
+ */
+ PMAP_ENTER(map->pmap, vaddr, m, VM_PROT_READ|VM_PROT_WRITE, wired);
+
+ vm_object_lock(m->object);
+ vm_page_lock_queues();
+ if (!m->active && !m->inactive)
+ vm_page_activate(m);
+ m->reference = TRUE;
+ vm_page_unlock_queues();
+
+ vm_map_verify_done(map, &version);
+ PAGE_WAKEUP_DONE(m);
+
+ UNLOCK_AND_DEALLOCATE;
+
+#undef UNLOCK_AND_DEALLOCATE
+#undef RELEASE_PAGE
+
+ return (KERN_SUCCESS);
+}
+#endif
diff --git a/riscv/intel/read_fault.h b/riscv/intel/read_fault.h
new file mode 100644
index 0000000..8aa3f03
--- /dev/null
+++ b/riscv/intel/read_fault.h
@@ -0,0 +1,35 @@
+/*
+ * Kernel read_fault on i386 functions
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Kernel read_fault on i386 functions.
+ *
+ */
+
+#ifndef _READ_FAULT_H_
+#define _READ_FAULT_H_
+
+#include <mach/std_types.h>
+
+extern kern_return_t intel_read_fault(
+ vm_map_t map,
+ vm_offset_t vaddr);
+
+#endif /* _READ_FAULT_H_ */
diff --git a/riscv/ldscript b/riscv/ldscript
new file mode 100644
index 0000000..195b4a3
--- /dev/null
+++ b/riscv/ldscript
@@ -0,0 +1,213 @@
+/* Default linker script, for normal executables */
+OUTPUT_FORMAT("elf64-littleriscv", "elf64-littleriscv",
+ "elf64-littleriscv")
+OUTPUT_ARCH(riscv)
+
+MEMORY
+{
+ /* qemu-system-risc64 virt machine */
+ RAM (rwx) : ORIGIN = 0x80000000, LENGTH = 128M
+}
+
+ENTRY(_start)
+SECTIONS
+{
+ /*
+ * There are specific requirements about entry points, so we have it
+ * configurable via `_START': `.text' will begin there and `.text.start' will
+ * be first in there. See also `i386/i386at/boothdr.S' and
+ * `gnumach_LINKFLAGS' in `i386/Makefrag.am'.
+ */
+ PROVIDE(__stack_top = ORIGIN(RAM) + LENGTH(RAM));
+ . = _START;
+ .text :
+ AT (_START_MAP)
+ {
+ *(.startup)
+ *(.text.start)
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ *(.text.unlikely .text.*_unlikely)
+ KEEP (*(.text.*personality*))
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ }
+ .init :
+ {
+ KEEP (*(.init))
+ }
+ .fini :
+ {
+ KEEP (*(.fini))
+ }
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = .);
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
+ .rel.fini : { *(.rel.fini) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
+ .rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) }
+ .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
+ .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+ .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+ .rel.ifunc : { *(.rel.ifunc) }
+ .rel.plt :
+ {
+ *(.rel.plt)
+ PROVIDE_HIDDEN (__rel_iplt_start = .);
+ *(.rel.iplt)
+ PROVIDE_HIDDEN (__rel_iplt_end = .);
+ }
+ .plt : { *(.plt) *(.iplt) }
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table
+ .gcc_except_table.*) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(SORT(.fini_array.*)))
+ KEEP (*(.fini_array))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got) *(.igot) }
+ . = DATA_SEGMENT_RELRO_END (12, .);
+ .got.plt : { *(.got.plt) *(.igot.plt) }
+ .data :
+ {
+ *(.data .data.* .gnu.linkonce.d.*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ __bss_start = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections.
+ FIXME: Why do we need it? When there is no .bss section, we don't
+ pad the .data section. */
+ . = ALIGN(. != 0 ? 32 / 8 : 1);
+ }
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ /* TODO: fix */
+ . += 4096;
+ PROVIDE(__global_pointer$ = .);
+ _end = .; PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) }
+}
diff --git a/riscv/riscv/ast.h b/riscv/riscv/ast.h
new file mode 100644
index 0000000..7afaa41
--- /dev/null
+++ b/riscv/riscv/ast.h
@@ -0,0 +1,47 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_AST_H_
+#define _I386_AST_H_
+
+/*
+ * Machine-dependent AST file for machines with no hardware AST support.
+ *
+ * For the I386, we define AST_I386_FP to handle delayed
+ * floating-point exceptions. The FPU may interrupt on errors
+ * while the user is not running (in kernel or other thread running).
+ */
+
+#define AST_I386_FP 0x80000000
+
+#define MACHINE_AST_PER_THREAD AST_I386_FP
+
+
+/* Chain to the machine-independent header. */
+/* #include_next "ast.h" */
+
+
+#endif /* _I386_AST_H_ */
diff --git a/riscv/riscv/boothdr.S b/riscv/riscv/boothdr.S
new file mode 100644
index 0000000..7c24c47
--- /dev/null
+++ b/riscv/riscv/boothdr.S
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#include <asm/asm-offsets.h>
+#include <asm/asm.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/csr.h>
+#include <asm/cpu_ops_sbi.h>
+#include <asm/hwcap.h>
+#include <asm/image.h>
+#include <asm/scs.h>
+#include <asm/xip_fixup.h>
+#include "const.h"
+
+.section .startup, "ax"
+.global _start
+
+_start:
+ j _start_kernel
+ /* reserved */
+ .word 0
+
+_start_kernel:
+ /* Mask all interrupts */
+ csrw CSR_IE, zero
+ csrw CSR_IP, zero
+
+ /* flush the instruction cache */
+ fence.i
+
+ /* Reset all registers except ra, a0, a1 */
+ call reset_regs
+
+ // testing
+ .cfi_startproc
+ .cfi_undefined ra
+ .option push
+ .option norelax
+ la gp, __global_pointer$
+ .option pop
+ la sp, __stack_top
+ jal zero, c_boot_entry
+ .cfi_endproc
+ // testing
+
+ /*
+ * Setup a PMP to permit access to all of memory. Some machines may
+ * not implement PMPs, so we set up a quick trap handler to just skip
+ * touching the PMPs on any trap.
+ */
+ la a0, .Lpmp_done
+ csrw CSR_TVEC, a0
+
+ li a0, -1
+ csrw CSR_PMPADDR0, a0
+ li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
+ csrw CSR_PMPCFG0, a0
+
+
+.align 2
+
+.Lpmp_done:
+ /*
+ * The hartid in a0 is expected later on, and we have no firmware
+ * to hand it to us.
+ */
+ csrr a0, CSR_MHARTID
+
+ /* Load the global pointer */
+ load_global_pointer
+
+ /*
+ * Disable FPU & VECTOR to detect illegal usage of
+ * floating point or vector in kernel space
+ */
+ li t0, SR_FS_VS
+ csrc CSR_STATUS, t0
+
+#ifdef CONFIG_RISCV_BOOT_SPINWAIT
+ li t0, CONFIG_NR_CPUS
+ blt a0, t0, .Lgood_cores
+ tail .Lsecondary_park
+.Lgood_cores:
+ /* hart_lottery in flash contains a magic number */
+ la a3, hart_lottery
+ mv a2, a3
+ XIP_FIXUP_OFFSET a2
+ XIP_FIXUP_FLASH_OFFSET a3
+ lw t1, (a3)
+ amoswap.w t0, t1, (a2)
+ /* first time here if hart_lottery in RAM is not set */
+ beq t0, t1, .Lsecondary_start
+#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
+
+ /* Clear BSS for flat non-ELF images */
+ /* __bss_start */
+ la a3, _start
+ /* __bss_stop */
+ la a4, _end
+ ble a4, a3, .Lclear_bss_done
+.Lclear_bss:
+ REG_S zero, (a3)
+ add a3, a3, RISCV_SZPTR
+ blt a3, a4, .Lclear_bss
+.Lclear_bss_done:
+ la a2, boot_cpu_hartid
+ XIP_FIXUP_OFFSET a2
+ REG_S a0, (a2)
+
+ /* Initialize page tables and relocate to virtual addresses */
+ //la tp, init_task
+ //la sp, init_thread_union + THREAD_SIZE
+ //XIP_FIXUP_OFFSET sp
+ //addi sp, sp, -PT_SIZE_ON_STACK
+ //scs_load_init_stack
+
+
+reset_regs:
+ li sp, 0
+ li gp, 0
+ li tp, 0
+ li t0, 0
+ li t1, 0
+ li t2, 0
+ li s0, 0
+ li s1, 0
+ li a2, 0
+ li a3, 0
+ li a4, 0
+ li a5, 0
+ li a6, 0
+ li a7, 0
+ li s2, 0
+ li s3, 0
+ li s4, 0
+ li s5, 0
+ li s6, 0
+ li s7, 0
+ li s8, 0
+ li s9, 0
+ li s10, 0
+ li s11, 0
+ li t3, 0
+ li t4, 0
+ li t5, 0
+ li t6, 0
+ csrw CSR_SCRATCH, 0
+
+#ifdef CONFIG_FPU
+ csrr t0, CSR_MISA
+ andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
+ beqz t0, .Lreset_regs_done_fpu
+
+ li t1, SR_FS
+ csrs CSR_STATUS, t1
+ fmv.s.x f0, zero
+ fmv.s.x f1, zero
+ fmv.s.x f2, zero
+ fmv.s.x f3, zero
+ fmv.s.x f4, zero
+ fmv.s.x f5, zero
+ fmv.s.x f6, zero
+ fmv.s.x f7, zero
+ fmv.s.x f8, zero
+ fmv.s.x f9, zero
+ fmv.s.x f10, zero
+ fmv.s.x f11, zero
+ fmv.s.x f12, zero
+ fmv.s.x f13, zero
+ fmv.s.x f14, zero
+ fmv.s.x f15, zero
+ fmv.s.x f16, zero
+ fmv.s.x f17, zero
+ fmv.s.x f18, zero
+ fmv.s.x f19, zero
+ fmv.s.x f20, zero
+ fmv.s.x f21, zero
+ fmv.s.x f22, zero
+ fmv.s.x f23, zero
+ fmv.s.x f24, zero
+ fmv.s.x f25, zero
+ fmv.s.x f26, zero
+ fmv.s.x f27, zero
+ fmv.s.x f28, zero
+ fmv.s.x f29, zero
+ fmv.s.x f30, zero
+ fmv.s.x f31, zero
+ csrw fcsr, 0
+ /* note that the caller must clear SR_FS */
+.Lreset_regs_done_fpu:
+#endif /* CONFIG_FPU */
+
+#ifdef CONFIG_RISCV_ISA_V
+ csrr t0, CSR_MISA
+ li t1, COMPAT_HWCAP_ISA_V
+ and t0, t0, t1
+ beqz t0, .Lreset_regs_done_vector
+
+ /*
+ * Clear vector registers and reset vcsr
+ * VLMAX has a defined value, VLEN is a constant,
+ * and this form of vsetvli is defined to set vl to VLMAX.
+ */
+ li t1, SR_VS
+ csrs CSR_STATUS, t1
+ csrs CSR_VCSR, x0
+ vsetvli t1, x0, e8, m8, ta, ma
+ vmv.v.i v0, 0
+ vmv.v.i v8, 0
+ vmv.v.i v16, 0
+ vmv.v.i v24, 0
+ /* note that the caller must clear SR_VS */
+ .Lreset_regs_done_vector:
+#endif /* CONFIG_RISCV_ISA_V */
+ ret
+
+.end
diff --git a/riscv/riscv/const.h b/riscv/riscv/const.h
new file mode 100644
index 0000000..f589334
--- /dev/null
+++ b/riscv/riscv/const.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* const.h: Macros for dealing with constants. */
+
+//#define CONFIG_FPU
+//#define CONFIG_RISCV_ISA_V
+
+#ifndef _UAPI_LINUX_CONST_H
+#define _UAPI_LINUX_CONST_H
+
+
+/* Some constant macros are used in both assembler and
+ * C code. Therefore we cannot annotate them always with
+ * 'UL' and other type specifiers unilaterally. We
+ * use the following macros to deal with this.
+ *
+ * Similarly, _AT() will cast an expression with a type in C, but
+ * leave it unchanged in asm.
+ */
+
+#ifdef __ASSEMBLY__
+#define _AC(X,Y) X
+#define _AT(T,X) X
+#else
+#define __AC(X,Y) (X##Y)
+#define _AC(X,Y) __AC(X,Y)
+#define _AT(T,X) ((T)(X))
+#endif
+
+#define _UL(x) (_AC(x, UL))
+#define _ULL(x) (_AC(x, ULL))
+
+#define _BITUL(x) (_UL(1) << (x))
+#define _BITULL(x) (_ULL(1) << (x))
+
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#endif /* _UAPI_LINUX_CONST_H */
+
+#define CONFIG_NR_CPUS 2
+#define CONFIG_THREAD_SIZE_ORDER 2
diff --git a/riscv/riscv/copy_user.h b/riscv/riscv/copy_user.h
new file mode 100644
index 0000000..3d1c727
--- /dev/null
+++ b/riscv/riscv/copy_user.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2023 Free Software Foundation
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef COPY_USER_H
+#define COPY_USER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <machine/locore.h>
+#include <mach/message.h>
+
+/*
+ * The copyin_32to64() and copyout_64to32() routines are meant for data types
+ * that have different size in kernel and user space. They should be independent
+ * of endianness and hopefully can be reused in the future on other archs.
+ * These types are e.g.:
+ * - port names vs port pointers, on a 64-bit kernel
+ * - memory addresses, on a 64-bit kernel and 32-bit user
+ */
+
+static inline int copyin_32to64(const uint32_t *uaddr, uint64_t *kaddr)
+{
+ uint32_t rkaddr;
+ int ret;
+ ret = copyin(uaddr, &rkaddr, sizeof(uint32_t));
+ if (ret)
+ return ret;
+ *kaddr = rkaddr;
+ return 0;
+}
+
+static inline int copyout_64to32(const uint64_t *kaddr, uint32_t *uaddr)
+{
+ uint32_t rkaddr=*kaddr;
+ return copyout(&rkaddr, uaddr, sizeof(uint32_t));
+}
+
+static inline int copyin_address(const rpc_vm_offset_t *uaddr, vm_offset_t *kaddr)
+{
+#ifdef USER32
+ return copyin_32to64(uaddr, kaddr);
+#else /* USER32 */
+ return copyin(uaddr, kaddr, sizeof(*uaddr));
+#endif /* USER32 */
+}
+
+static inline int copyout_address(const vm_offset_t *kaddr, rpc_vm_offset_t *uaddr)
+{
+#ifdef USER32
+ return copyout_64to32(kaddr, uaddr);
+#else /* USER32 */
+ return copyout(kaddr, uaddr, sizeof(*kaddr));
+#endif /* USER32 */
+}
+
+static inline int copyin_port(const mach_port_name_t *uaddr, mach_port_t *kaddr)
+{
+#ifdef __x86_64__
+ return copyin_32to64(uaddr, kaddr);
+#else /* __x86_64__ */
+ return copyin(uaddr, kaddr, sizeof(*uaddr));
+#endif /* __x86_64__ */
+}
+
+static inline int copyout_port(const mach_port_t *kaddr, mach_port_name_t *uaddr)
+{
+#ifdef __x86_64__
+ return copyout_64to32(kaddr, uaddr);
+#else /* __x86_64__ */
+ return copyout(kaddr, uaddr, sizeof(*kaddr));
+#endif /* __x86_64__ */
+}
+
+#if defined(__x86_64__) && defined(USER32)
+/* For 32 bit userland, kernel and user land messages are not the same size. */
+size_t msg_usize(const mach_msg_header_t *kmsg);
+#else
+static inline size_t msg_usize(const mach_msg_header_t *kmsg)
+{
+ return kmsg->msgh_size;
+}
+#endif /* __x86_64__ && USER32 */
+
+#endif /* COPY_USER_H */
diff --git a/riscv/riscv/cpu_number.h b/riscv/riscv/cpu_number.h
new file mode 100644
index 0000000..fdd2479
--- /dev/null
+++ b/riscv/riscv/cpu_number.h
@@ -0,0 +1,119 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent definitions for cpu identification.
+ *
+ */
+#ifndef _RISCV_CPU_NUMBER_H_
+#define _RISCV_CPU_NUMBER_H_
+
+#if NCPUS > 1
+
+#define MY(stm) %gs:PERCPU_##stm
+
+#ifdef __i386__
+#define CX(addr, reg) addr(,reg,4)
+#endif
+#ifdef __x86_64__
+#define CX(addr, reg) addr(,reg,8)
+#endif
+
+#define CPU_NUMBER_NO_STACK(reg) \
+ movl %cs:lapic, reg ;\
+ movl %cs:APIC_ID(reg), reg ;\
+ shrl $24, reg ;\
+ movl %cs:CX(cpu_id_lut, reg), reg ;\
+
+#ifdef __i386__
+/* Never call CPU_NUMBER_NO_GS(%esi) */
+#define CPU_NUMBER_NO_GS(reg) \
+ pushl %esi ;\
+ pushl %eax ;\
+ pushl %ebx ;\
+ pushl %ecx ;\
+ pushl %edx ;\
+ movl $1, %eax ;\
+ cpuid ;\
+ shrl $24, %ebx ;\
+ movl %cs:CX(cpu_id_lut, %ebx), %esi ;\
+ popl %edx ;\
+ popl %ecx ;\
+ popl %ebx ;\
+ popl %eax ;\
+ movl %esi, reg ;\
+ popl %esi
+#endif
+#ifdef __x86_64__
+/* Never call CPU_NUMBER_NO_GS(%esi) */
+#define CPU_NUMBER_NO_GS(reg) \
+ pushq %rsi ;\
+ pushq %rax ;\
+ pushq %rbx ;\
+ pushq %rcx ;\
+ pushq %rdx ;\
+ movl $1, %eax ;\
+ cpuid ;\
+ shrl $24, %ebx ;\
+ movl %cs:CX(cpu_id_lut, %ebx), %esi ;\
+ popq %rdx ;\
+ popq %rcx ;\
+ popq %rbx ;\
+ popq %rax ;\
+ movl %esi, reg ;\
+ popq %rsi
+#endif
+
+#define CPU_NUMBER(reg) \
+ movl MY(CPU_ID), reg;
+
+#ifndef __ASSEMBLER__
+#include <kern/cpu_number.h>
+#include <riscv/apic.h>
+#include <riscv/percpu.h>
+
+static inline int cpu_number_slow(void)
+{
+ return cpu_id_lut[apic_get_current_cpu()];
+}
+
+static inline int cpu_number(void)
+{
+ return percpu_get(int, cpu_id);
+}
+#endif
+
+#else /* NCPUS == 1 */
+
+#define MY(stm) (percpu_array + PERCPU_##stm)
+
+#define CPU_NUMBER_NO_STACK(reg)
+#define CPU_NUMBER_NO_GS(reg)
+#define CPU_NUMBER(reg)
+#define CX(addr,reg) addr
+
+#endif /* NCPUS == 1 */
+
+#endif /* _RISCV_CPU_NUMBER_H_ */
diff --git a/riscv/riscv/db_machdep.h b/riscv/riscv/db_machdep.h
new file mode 100644
index 0000000..40b8333
--- /dev/null
+++ b/riscv/riscv/db_machdep.h
@@ -0,0 +1,105 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_DB_MACHDEP_H_
+#define _I386_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/vm_param.h>
+#include <mach/machine/eflags.h>
+#include <riscv/thread.h> /* for thread_status */
+#include <riscv/trap.h>
+
+typedef vm_offset_t db_addr_t; /* address - unsigned */
+typedef long db_expr_t; /* expression - signed */
+
+typedef struct i386_saved_state db_regs_t;
+extern db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+#define SAVE_DDB_REGS DB_SAVE(db_regs_t, ddb_regs)
+#define RESTORE_DDB_REGS DB_RESTORE(ddb_regs)
+
+#define PC_REGS(regs) ((db_addr_t)(regs)->eip)
+
+#define BKPT_INST 0xcc /* breakpoint instruction */
+#define BKPT_SIZE (1) /* size of breakpoint inst */
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define FIXUP_PC_AFTER_BREAK ddb_regs.eip -= 1;
+
+#define db_clear_single_step(regs) ((regs)->efl &= ~EFL_TF)
+#define db_set_single_step(regs) ((regs)->efl |= EFL_TF)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_INT3)
+#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT)
+
+#define I_CALL 0xe8
+#define I_CALLI 0xff
+#define I_RET 0xc3
+#define I_IRET 0xcf
+
+#define inst_trap_return(ins) (((ins)&0xff) == I_IRET)
+#define inst_return(ins) (((ins)&0xff) == I_RET)
+#define inst_call(ins) (((ins)&0xff) == I_CALL || \
+ (((ins)&0xff) == I_CALLI && \
+ ((ins)&0x3800) == 0x1000))
+#define inst_load(ins) 0
+#define inst_store(ins) 0
+
+/* access capability and access macros */
+
+#define DB_ACCESS_LEVEL 2 /* access any space */
+#define DB_CHECK_ACCESS(addr,size,task) \
+ db_check_access(addr,size,task)
+#define DB_PHYS_EQ(task1,addr1,task2,addr2) \
+ db_phys_eq(task1,addr1,task2,addr2)
+#define DB_VALID_KERN_ADDR(addr) \
+ ((addr) >= VM_MIN_KERNEL_ADDRESS && \
+ (addr) < VM_MAX_KERNEL_ADDRESS)
+#define DB_VALID_ADDRESS(addr,user) \
+ ((!(user) && DB_VALID_KERN_ADDR(addr)) || \
+ ((user) && (addr) < VM_MIN_KERNEL_ADDRESS))
+
+/* macros for printing OS server dependent task name */
+
+#define DB_TASK_NAME(task) db_task_name(task)
+#define DB_TASK_NAME_TITLE "COMMAND "
+#define DB_TASK_NAME_LEN 23
+#define DB_NULL_TASK_NAME "? "
+
+/* macro for checking if a thread has used floating-point */
+
+#define db_thread_fp_used(thread) ((thread)->pcb->ims.ifps != 0)
+
+/* only a.out symbol tables */
+
+#define DB_NO_COFF 1
+
+#endif /* _I386_DB_MACHDEP_H_ */
diff --git a/riscv/riscv/io_perm.h b/riscv/riscv/io_perm.h
new file mode 100644
index 0000000..b97cf97
--- /dev/null
+++ b/riscv/riscv/io_perm.h
@@ -0,0 +1,63 @@
+/* Data types for I/O permission bitmap objects.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _I386_IO_PERM_H_
+#define _I386_IO_PERM_H_
+
+#include <device/dev_hdr.h>
+#include <ipc/ipc_types.h>
+
+
+/* The highest possible I/O port. */
+#define IOPB_MAX 0xffff
+
+/* The number of bytes needed to hold all permission bits. */
+#define IOPB_BYTES (((IOPB_MAX + 1) + 7) / 8)
+
+/* An offset that points outside of the permission bitmap, used to
+ disable all permission. */
+#define IOPB_INVAL 0x2fff
+
+
+/* The type of an I/O port address. */
+typedef unsigned short io_port_t;
+
+
+struct io_perm
+{
+ /* We use a ``struct device'' for easy management. */
+ struct device device;
+
+ ipc_port_t port;
+
+ io_port_t from, to;
+};
+
+typedef struct io_perm *io_perm_t;
+
+#define IO_PERM_NULL ((io_perm_t) 0)
+
+extern io_perm_t convert_port_to_io_perm (ipc_port_t);
+extern ipc_port_t convert_io_perm_to_port (io_perm_t);
+extern void io_perm_deallocate (io_perm_t);
+
+#endif /* _I386_IO_PERM_H_ */
diff --git a/riscv/riscv/ipl.h b/riscv/riscv/ipl.h
new file mode 100644
index 0000000..6e59b36
--- /dev/null
+++ b/riscv/riscv/ipl.h
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_IPL_H_
+#define _I386_IPL_H_
+
+#define SPL0 0
+#define SPL1 1
+#define SPL2 2
+#define SPL3 3
+#define SPL4 4
+#define SPL5 5
+#define SPL6 6
+#define SPL7 7
+
+#define SPLPP 5
+#define SPLTTY 6
+#define SPLNI 6
+#define SPLHI 7
+#define IPLHI SPLHI
+
+#define NSPL (SPL7 + 1)
+
+#ifdef KERNEL
+#ifndef __ASSEMBLER__
+#include <machine/machspl.h>
+/* Note that interrupts have varying signatures */
+typedef void (*interrupt_handler_fn)(int);
+extern interrupt_handler_fn ivect[];
+extern int iunit[];
+extern spl_t curr_ipl[NCPUS];
+#endif /* __ASSEMBLER__ */
+#endif /* KERNEL */
+
+#endif /* _I386_IPL_H_ */
diff --git a/riscv/riscv/irq.c b/riscv/riscv/irq.c
new file mode 100644
index 0000000..c73bccb
--- /dev/null
+++ b/riscv/riscv/irq.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 1995 Shantanu Goel
+ * Copyright (C) 2020 Free Software Foundation, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <riscv/irq.h>
+#include <device/intr.h>
+#include <mach/kern_return.h>
+#include <kern/queue.h>
+#include <kern/assert.h>
+#include <machine/machspl.h>
+
+extern queue_head_t main_intr_queue;
+
+static void
+irq_eoi (struct irqdev *dev, int id)
+{
+#ifdef APIC
+ ioapic_irq_eoi (dev->irq[id]);
+#endif
+}
+
+static unsigned int ndisabled_irq[NINTR];
+
+void
+__disable_irq (irq_t irq_nr)
+{
+ assert (irq_nr < NINTR);
+
+ spl_t s = splhigh();
+ ndisabled_irq[irq_nr]++;
+ assert (ndisabled_irq[irq_nr] > 0);
+ if (ndisabled_irq[irq_nr] == 1)
+ mask_irq (irq_nr);
+ splx(s);
+}
+
+void
+__enable_irq (irq_t irq_nr)
+{
+ assert (irq_nr < NINTR);
+
+ spl_t s = splhigh();
+ assert (ndisabled_irq[irq_nr] > 0);
+ ndisabled_irq[irq_nr]--;
+ if (ndisabled_irq[irq_nr] == 0)
+ unmask_irq (irq_nr);
+ splx(s);
+}
+
+struct irqdev irqtab = {
+ "irq", irq_eoi, &main_intr_queue, 0,
+#ifdef APIC
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
+#else
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+#endif
+};
+
diff --git a/riscv/riscv/irq.h b/riscv/riscv/irq.h
new file mode 100644
index 0000000..bdab3ad
--- /dev/null
+++ b/riscv/riscv/irq.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2020 Free Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE FREE SOFTWARE FOUNDATION ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ */
+
+#ifndef _I386_IRQ_H
+#define _I386_IRQ_H
+
+#ifdef APIC
+# include <riscv/apic.h>
+#else
+# include <riscv/pic.h>
+#endif
+
+typedef unsigned int irq_t;
+
+void __enable_irq (irq_t irq);
+void __disable_irq (irq_t irq);
+
+extern struct irqdev irqtab;
+
+#endif
diff --git a/riscv/riscv/locore.h b/riscv/riscv/locore.h
new file mode 100644
index 0000000..374c8cf
--- /dev/null
+++ b/riscv/riscv/locore.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2006, 2011 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _MACHINE_LOCORE_H_
+#define _MACHINE_LOCORE_H_
+
+#include <sys/types.h>
+
+#include <kern/sched_prim.h>
+
+/*
+ * Fault recovery in copyin/copyout routines.
+ */
+struct recovery {
+ vm_offset_t fault_addr;
+ vm_offset_t recover_addr;
+};
+
+extern struct recovery recover_table[];
+extern struct recovery recover_table_end[];
+
+/*
+ * Recovery from Successful fault in copyout does not
+ * return directly - it retries the pte check, since
+ * the 386 ignores write protection in kernel mode.
+ */
+extern struct recovery retry_table[];
+extern struct recovery retry_table_end[];
+
+
+extern int call_continuation (continuation_t continuation);
+
+extern int discover_x86_cpu_type (void);
+
+extern int copyin (const void *userbuf, void *kernelbuf, size_t cn);
+extern int copyinmsg (const void *userbuf, void *kernelbuf, size_t cn, size_t kn);
+extern int copyout (const void *kernelbuf, void *userbuf, size_t cn);
+extern int copyoutmsg (const void *kernelbuf, void *userbuf, size_t cn);
+
+extern int inst_fetch (int eip, int cs);
+
+extern void cpu_shutdown (void);
+
+extern int syscall (void);
+extern int syscall64 (void);
+
+extern unsigned int cpu_features[2];
+
+#define CPU_FEATURE_FPU 0
+#define CPU_FEATURE_VME 1
+#define CPU_FEATURE_DE 2
+#define CPU_FEATURE_PSE 3
+#define CPU_FEATURE_TSC 4
+#define CPU_FEATURE_MSR 5
+#define CPU_FEATURE_PAE 6
+#define CPU_FEATURE_MCE 7
+#define CPU_FEATURE_CX8 8
+#define CPU_FEATURE_APIC 9
+#define CPU_FEATURE_SEP 11
+#define CPU_FEATURE_MTRR 12
+#define CPU_FEATURE_PGE 13
+#define CPU_FEATURE_MCA 14
+#define CPU_FEATURE_CMOV 15
+#define CPU_FEATURE_PAT 16
+#define CPU_FEATURE_PSE_36 17
+#define CPU_FEATURE_PSN 18
+#define CPU_FEATURE_CFLSH 19
+#define CPU_FEATURE_DS 21
+#define CPU_FEATURE_ACPI 22
+#define CPU_FEATURE_MMX 23
+#define CPU_FEATURE_FXSR 24
+#define CPU_FEATURE_SSE 25
+#define CPU_FEATURE_SSE2 26
+#define CPU_FEATURE_SS 27
+#define CPU_FEATURE_HTT 28
+#define CPU_FEATURE_TM 29
+#define CPU_FEATURE_PBE 31
+#define CPU_FEATURE_XSAVE (1*32 + 26)
+
+#define CPU_HAS_FEATURE(feature) (cpu_features[(feature) / 32] & (1 << ((feature) % 32)))
+
+#endif /* _MACHINE__LOCORE_H_ */
+
diff --git a/riscv/riscv/loose_ends.h b/riscv/riscv/loose_ends.h
new file mode 100644
index 0000000..c085527
--- /dev/null
+++ b/riscv/riscv/loose_ends.h
@@ -0,0 +1,33 @@
+/*
+ * Other useful functions?
+ * Copyright (C) 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ * Other useful functions?
+ *
+ */
+
+#ifndef _LOOSE_ENDS_H_
+#define _LOOSE_ENDS_H_
+
+#include <mach/std_types.h>
+
+extern void delay (int n);
+
+#endif /* _LOOSE_ENDS_H_ */
diff --git a/riscv/riscv/mach_param.h b/riscv/riscv/mach_param.h
new file mode 100644
index 0000000..d7d4dee
--- /dev/null
+++ b/riscv/riscv/mach_param.h
@@ -0,0 +1,31 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Machine-dependent parameters for i386.
+ */
+
+#define HZ (100)
+ /* clock tick each 10 ms. */
diff --git a/riscv/riscv/mach_riscv.srv b/riscv/riscv/mach_riscv.srv
new file mode 100644
index 0000000..6023afb
--- /dev/null
+++ b/riscv/riscv/mach_riscv.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/machine/mach_riscv.defs>
diff --git a/riscv/riscv/machine_routines.h b/riscv/riscv/machine_routines.h
new file mode 100644
index 0000000..aa41e81
--- /dev/null
+++ b/riscv/riscv/machine_routines.h
@@ -0,0 +1,38 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MACHINE_ROUTINES_H_
+#define _I386_MACHINE_ROUTINES_H_
+
+/*
+ * The i386 has a set of machine-dependent interfaces.
+ */
+#define MACHINE_SERVER mach_riscv_server
+#define MACHINE_SERVER_HEADER "riscv/riscv/mach_riscv.server.h"
+#define MACHINE_SERVER_ROUTINE mach_riscv_server_routine
+
+#endif /* _I386_MACHINE_ROUTINES_H_ */
+
diff --git a/riscv/riscv/machspl.h b/riscv/riscv/machspl.h
new file mode 100644
index 0000000..62915a9
--- /dev/null
+++ b/riscv/riscv/machspl.h
@@ -0,0 +1,29 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/* XXX replaced by... */
+#include <riscv/spl.h>
+
diff --git a/riscv/riscv/model_dep.c b/riscv/riscv/model_dep.c
new file mode 100644
index 0000000..c0ba56f
--- /dev/null
+++ b/riscv/riscv/model_dep.c
@@ -0,0 +1,124 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: model_dep.c
+ * Author: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Basic initialization for I386 - ISA bus machines.
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <device/cons.h>
+
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <mach/machine.h>
+#include <mach/machine/multiboot.h>
+#include <mach/xen.h>
+
+#include <kern/assert.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/mach_clock.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <kern/startup.h>
+#include <kern/smp.h>
+#include <sys/types.h>
+#include <vm/vm_page.h>
+
+/*
+#include <riscv/fpu.h>
+#include <riscv/gdt.h>
+#include <riscv/ktss.h>
+#include <riscv/ldt.h>
+#include <riscv/machspl.h>
+#include <riscv/mp_desc.h>
+#include <riscv/pit.h>
+#include <riscv/pmap.h>
+#include <riscv/proc_reg.h>
+#include <riscv/vm_param.h>
+#include <riscv/locore.h>
+#include <riscv/model_dep.h>
+#include <riscv/smp.h>
+#include <riscv/seg.h>
+#include <riscv/acpi_parse_apic.h>
+#include <riscv/autoconf.h>
+#include <riscv/biosmem.h>
+#include <riscv/elf.h>
+#include <riscv/idt.h>
+#include <riscv/int_init.h>
+#include <riscv/kd.h>
+#include <riscv/rtc.h>
+#include <riscv/model_dep.h>
+#include <machine/irq.h>
+*/
+
+/* start test for risc-v */
+unsigned char *uart = (unsigned char *)0x10000000;
+
+void putchar_uart(char c) {
+ *uart = c;
+ return;
+}
+
+void print_uart(const char * str) {
+ while(*str != '\0') {
+ putchar_uart(*str);
+ ++str;
+ }
+ return;
+}
+
+void hello_riscv(void) {
+ print_uart("Hello world!\n");
+ while(1) {
+ // Read input from the UART
+ putchar_uart(*uart);
+ }
+ return;
+}
+/* end test for risc-v */
+
+/*
+ * C boot entrypoint - called by boot_entry in boothdr.S.
+ * Running in flat mode, but without paging yet.
+ */
+//void c_boot_entry(vm_offset_t b)
+void c_boot_entry()
+{
+ print_uart("Hello world!\n");
+ while(1) {
+ // Read input from the UART
+ putchar_uart(*uart);
+ }
+ return;
+
+}
diff --git a/riscv/riscv/model_dep.h b/riscv/riscv/model_dep.h
new file mode 100644
index 0000000..8ef3fce
--- /dev/null
+++ b/riscv/riscv/model_dep.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MODEL_DEP_H_
+#define _MODEL_DEP_H_
+
+#include <riscv/vm_param.h>
+#include <mach/vm_prot.h>
+
+/*
+ * Interrupt stack.
+ */
+extern vm_offset_t int_stack_top[NCPUS], int_stack_base[NCPUS];
+
+/* Check whether P points to the per-cpu interrupt stack. */
+#define ON_INT_STACK(P, CPU) (((P) & ~(INTSTACK_SIZE-1)) == int_stack_base[CPU])
+
+extern vm_offset_t timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+
+void inittodr(void);
+
+boolean_t init_alloc_aligned(vm_size_t size, vm_offset_t *addrp);
+
+#endif /* _MODEL_DEP_H_ */
diff --git a/riscv/riscv/mp_desc.h b/riscv/riscv/mp_desc.h
new file mode 100644
index 0000000..dc3a7dc
--- /dev/null
+++ b/riscv/riscv/mp_desc.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_MP_DESC_H_
+#define _I386_MP_DESC_H_
+
+#include <mach/kern_return.h>
+
+#if MULTIPROCESSOR
+
+/*
+ * Multiprocessor i386/i486 systems use a separate copy of the
+ * GDT, IDT, LDT, and kernel TSS per processor. The first three
+ * are separate to avoid lock contention: the i386 uses locked
+ * memory cycles to access the descriptor tables. The TSS is
+ * separate since each processor needs its own kernel stack,
+ * and since using a TSS marks it busy.
+ */
+
+#include "seg.h"
+#include "tss.h"
+#include <i386at/idt.h>
+#include "gdt.h"
+#include "ldt.h"
+
+/*
+ * The descriptor tables are together in a structure
+ * allocated one per processor (except for the boot processor).
+ */
+struct mp_desc_table {
+ struct real_gate idt[IDTSZ]; /* IDT */
+ struct real_descriptor gdt[GDTSZ]; /* GDT */
+ struct real_descriptor ldt[LDTSZ]; /* LDT */
+ struct task_tss ktss;
+};
+
+/*
+ * They are pointed to by a per-processor array.
+ */
+extern struct mp_desc_table *mp_desc_table[NCPUS];
+
+/*
+ * The kernel TSS gets its own pointer.
+ */
+extern struct task_tss *mp_ktss[NCPUS];
+
+/*
+ * So does the GDT.
+ */
+extern struct real_descriptor *mp_gdt[NCPUS];
+
+extern uint8_t solid_intstack[];
+
+extern int bspdone;
+
+/*
+ * Each CPU calls this routine to set up its descriptor tables.
+ */
+extern int mp_desc_init(int);
+
+
+extern void interrupt_processor(int cpu);
+
+
+#endif /* MULTIPROCESSOR */
+
+extern void start_other_cpus(void);
+
+extern kern_return_t cpu_start(int cpu);
+
+extern kern_return_t cpu_control(int cpu, const int *info, unsigned int count);
+
+extern void interrupt_stack_alloc(void);
+
+#endif /* _I386_MP_DESC_H_ */
diff --git a/riscv/riscv/pcb.h b/riscv/riscv/pcb.h
new file mode 100644
index 0000000..4d48b9f
--- /dev/null
+++ b/riscv/riscv/pcb.h
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright (C) 2006 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Barry deFreese.
+ */
+/*
+ *
+ *
+ */
+
+#ifndef _I386_PCB_H_
+#define _I386_PCB_H_
+
+#include <sys/types.h>
+#include <mach/exec/exec.h>
+#include <mach/thread_status.h>
+#include <machine/thread.h>
+#include <machine/io_perm.h>
+
+extern void pcb_init (task_t parent_task, thread_t thread);
+
+extern void pcb_terminate (thread_t thread);
+
+extern void pcb_collect (thread_t thread);
+
+extern kern_return_t thread_setstatus (
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int count);
+
+extern kern_return_t thread_getstatus (
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int *count);
+
+extern void thread_set_syscall_return (
+ thread_t thread,
+ kern_return_t retval);
+
+extern vm_offset_t user_stack_low (vm_size_t stack_size);
+
+extern vm_offset_t set_user_regs (
+ vm_offset_t stack_base,
+ vm_offset_t stack_size,
+ const struct exec_info *exec_info,
+ vm_size_t arg_size);
+
+extern void load_context (thread_t new);
+
+extern void stack_attach (
+ thread_t thread,
+ vm_offset_t stack,
+ void (*continuation)(thread_t));
+
+extern vm_offset_t stack_detach (thread_t thread);
+
+extern void switch_ktss (pcb_t pcb);
+
+extern void update_ktss_iopb (unsigned char *new_iopb, io_port_t size);
+
+extern thread_t Load_context (thread_t new);
+
+extern thread_t Switch_context (thread_t old, continuation_t continuation, thread_t new);
+
+extern void switch_to_shutdown_context(thread_t thread,
+ void (*routine)(processor_t),
+ processor_t processor);
+
+extern void Thread_continue (void);
+
+extern void pcb_module_init (void);
+
+#endif /* _I386_PCB_H_ */
diff --git a/riscv/riscv/percpu.h b/riscv/riscv/percpu.h
new file mode 100644
index 0000000..86b0a31
--- /dev/null
+++ b/riscv/riscv/percpu.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2023 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PERCPU_H_
+#define _PERCPU_H_
+
+struct percpu;
+
+#if NCPUS > 1
+
+#define percpu_assign(stm, val) \
+ asm("mov %[src], %%gs:%c[offs]" \
+ : /* No outputs */ \
+ : [src] "r" (val), [offs] "e" (__builtin_offsetof(struct percpu, stm)) \
+ : );
+
+#define percpu_get(typ, stm) \
+MACRO_BEGIN \
+ typ val_; \
+ \
+ asm("mov %%gs:%c[offs], %[dst]" \
+ : [dst] "=r" (val_) \
+ : [offs] "e" (__builtin_offsetof(struct percpu, stm)) \
+ : ); \
+ \
+ val_; \
+MACRO_END
+
+#define percpu_ptr(typ, stm) \
+MACRO_BEGIN \
+ typ *ptr_ = (typ *)__builtin_offsetof(struct percpu, stm); \
+ \
+ asm("add %%gs:0, %[pointer]" \
+ : [pointer] "+r" (ptr_) \
+ : /* No inputs */ \
+ : ); \
+ \
+ ptr_; \
+MACRO_END
+
+#else
+
+#define percpu_assign(stm, val) \
+MACRO_BEGIN \
+ percpu_array[0].stm = val; \
+MACRO_END
+#define percpu_get(typ, stm) \
+ (percpu_array[0].stm)
+#define percpu_ptr(typ, stm) \
+ (&percpu_array[0].stm)
+
+#endif
+
+#include <kern/processor.h>
+#include <kern/thread.h>
+
+struct percpu {
+ struct percpu *self;
+ int apic_id;
+ int cpu_id;
+ struct processor processor;
+ thread_t active_thread;
+ vm_offset_t active_stack;
+/*
+ struct machine_slot machine_slot;
+ struct mp_desc_table mp_desc_table;
+ vm_offset_t int_stack_top;
+ vm_offset_t int_stack_base;
+ ast_t need_ast;
+ ipc_kmsg_t ipc_kmsg_cache;
+ pmap_update_list cpu_update_list;
+ spl_t saved_ipl;
+ spl_t curr_ipl;
+ timer_data_t kernel_timer;
+ timer_t current_timer;
+ unsigned long in_interrupt;
+*/
+};
+
+extern struct percpu percpu_array[NCPUS];
+
+void init_percpu(int cpu);
+
+#endif /* _PERCPU_H_ */
diff --git a/riscv/riscv/pic.c b/riscv/riscv/pic.c
new file mode 100644
index 0000000..66fbc04
--- /dev/null
+++ b/riscv/riscv/pic.c
@@ -0,0 +1,262 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/*
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <sys/types.h>
+#include <kern/printf.h>
+#include <i386/ipl.h>
+#include <i386/pic.h>
+#include <i386/machspl.h>
+#include <i386/pio.h>
+
+spl_t curr_ipl[NCPUS] = {0};
+int curr_pic_mask;
+int spl_init = 0;
+
+int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+unsigned short master_icw, master_ocw, slaves_icw, slaves_ocw;
+
+u_short PICM_ICW1, PICM_OCW1, PICS_ICW1, PICS_OCW1 ;
+u_short PICM_ICW2, PICM_OCW2, PICS_ICW2, PICS_OCW2 ;
+u_short PICM_ICW3, PICM_OCW3, PICS_ICW3, PICS_OCW3 ;
+u_short PICM_ICW4, PICS_ICW4 ;
+
+/*
+** picinit() - This routine
+** * Establishes a table of interrupt vectors
+** * Establishes location of PICs in the system
+** * Unmasks all interrupts in the PICs
+** * Initialises them
+**
+** At this stage the interrupt functionality of this system should be
+** complete.
+*/
+
+/*
+** Initialise the PICs , master first, then the slave.
+** All the register field definitions are described in pic.h also
+** the settings of these fields for the various registers are selected.
+*/
+
+void
+picinit(void)
+{
+
+ asm("cli");
+
+ /*
+ ** 0. Initialise the current level to match cli()
+ */
+ int i;
+
+ for (i = 0; i < NCPUS; i++)
+ curr_ipl[i] = SPLHI;
+ curr_pic_mask = 0;
+
+ /*
+ ** 1. Generate addresses to each PIC port.
+ */
+
+ master_icw = PIC_MASTER_ICW;
+ master_ocw = PIC_MASTER_OCW;
+ slaves_icw = PIC_SLAVE_ICW;
+ slaves_ocw = PIC_SLAVE_OCW;
+
+ /*
+ ** 2. Select options for each ICW and each OCW for each PIC.
+ */
+
+ PICM_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICS_ICW1 =
+ (ICW_TEMPLATE | EDGE_TRIGGER | ADDR_INTRVL8 | CASCADE_MODE | ICW4__NEEDED);
+
+ PICM_ICW2 = PICM_VECTBASE;
+ PICS_ICW2 = PICS_VECTBASE;
+
+#ifdef AT386
+ PICM_ICW3 = ( SLAVE_ON_IR2 );
+ PICS_ICW3 = ( I_AM_SLAVE_2 );
+#endif /* AT386 */
+
+ PICM_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+ PICS_ICW4 =
+ (SNF_MODE_DIS | NONBUFD_MODE | NRML_EOI_MOD | I8086_EMM_MOD);
+
+ PICM_OCW1 = (curr_pic_mask & 0x00FF);
+ PICS_OCW1 = ((curr_pic_mask & 0xFF00)>>8);
+
+ PICM_OCW2 = NON_SPEC_EOI;
+ PICS_OCW2 = NON_SPEC_EOI;
+
+ PICM_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+ PICS_OCW3 = (OCW_TEMPLATE | READ_NEXT_RD | READ_IR_ONRD );
+
+ /*
+ ** 3. Initialise master - send commands to master PIC
+ */
+
+ outb ( master_icw, PICM_ICW1 );
+ outb ( master_ocw, PICM_ICW2 );
+ outb ( master_ocw, PICM_ICW3 );
+ outb ( master_ocw, PICM_ICW4 );
+
+ outb ( master_ocw, PICM_MASK );
+ outb ( master_icw, PICM_OCW3 );
+
+ /*
+ ** 4. Initialise slave - send commands to slave PIC
+ */
+
+ outb ( slaves_icw, PICS_ICW1 );
+ outb ( slaves_ocw, PICS_ICW2 );
+ outb ( slaves_ocw, PICS_ICW3 );
+ outb ( slaves_ocw, PICS_ICW4 );
+
+
+ outb ( slaves_ocw, PICS_OCW1 );
+ outb ( slaves_icw, PICS_OCW3 );
+
+ /*
+ ** 5. Initialise interrupts
+ */
+ outb ( master_ocw, PICM_OCW1 );
+
+}
+
+void
+intnull(int unit_dev)
+{
+ static char warned[NINTR];
+
+ if (unit_dev >= NINTR)
+ printf("Unknown interrupt %d\n", unit_dev);
+ else if (!warned[unit_dev])
+ {
+ printf("intnull(%d)\n", unit_dev);
+ warned[unit_dev] = 1;
+ }
+
+}
+
+/*
+ * Mask a PIC IRQ.
+ */
+void
+mask_irq (unsigned int irq_nr)
+{
+ int new_pic_mask = curr_pic_mask | 1 << irq_nr;
+
+ if (curr_pic_mask != new_pic_mask)
+ {
+ curr_pic_mask = new_pic_mask;
+ if (irq_nr < 8)
+ {
+ outb (PIC_MASTER_OCW, curr_pic_mask & 0xff);
+ }
+ else
+ {
+ outb (PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ }
+}
+
+/*
+ * Unmask a PIC IRQ.
+ */
+void
+unmask_irq (unsigned int irq_nr)
+{
+ int mask;
+ int new_pic_mask;
+
+ mask = 1 << irq_nr;
+ if (irq_nr >= 8)
+ {
+ mask |= 1 << 2;
+ }
+
+ new_pic_mask = curr_pic_mask & ~mask;
+
+ if (curr_pic_mask != new_pic_mask)
+ {
+ curr_pic_mask = new_pic_mask;
+ if (irq_nr < 8)
+ {
+ outb (PIC_MASTER_OCW, curr_pic_mask & 0xff);
+ }
+ else
+ {
+ outb (PIC_SLAVE_OCW, curr_pic_mask >> 8);
+ }
+ }
+}
+
diff --git a/riscv/riscv/pic.h b/riscv/riscv/pic.h
new file mode 100644
index 0000000..aec0ef6
--- /dev/null
+++ b/riscv/riscv/pic.h
@@ -0,0 +1,191 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760
+All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this
+software and its documentation for any purpose and
+without fee is hereby granted, provided that the above
+copyright notice appears in all copies and that both the
+copyright notice and this permission notice appear in
+supporting documentation, and that the name of Prime
+Computer, Inc. not be used in advertising or publicity
+pertaining to distribution of the software without
+specific, written prior permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER,
+INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN
+NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY
+SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR
+OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
+WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _I386_PIC_H_
+#define _I386_PIC_H_
+
+#ifndef APIC
+#define NINTR 0x10
+#endif
+#define NPICS 0x02
+
+/*
+** The following are definitions used to locate the PICs in the system
+*/
+
+#if defined(AT386) || defined(ATX86_64)
+#define ADDR_PIC_BASE 0x20
+#define OFF_ICW 0x00
+#define OFF_OCW 0x01
+#define SIZE_PIC 0x80
+#endif /* defined(AT386) */
+
+#define PIC_MASTER_ICW (ADDR_PIC_BASE + OFF_ICW)
+#define PIC_MASTER_OCW (ADDR_PIC_BASE + OFF_OCW)
+#define PIC_SLAVE_ICW (PIC_MASTER_ICW + SIZE_PIC)
+#define PIC_SLAVE_OCW (PIC_MASTER_OCW + SIZE_PIC)
+
+/*
+** The following banks of definitions ICW1, ICW2, ICW3, and ICW4 are used
+** to define the fields of the various ICWs for initialisation of the PICs
+*/
+
+/*
+** ICW1
+*/
+
+#define ICW_TEMPLATE 0x10
+
+#define LEVL_TRIGGER 0x08
+#define EDGE_TRIGGER 0x00
+#define ADDR_INTRVL4 0x04
+#define ADDR_INTRVL8 0x00
+#define SINGLE__MODE 0x02
+#define CASCADE_MODE 0x00
+#define ICW4__NEEDED 0x01
+#define NO_ICW4_NEED 0x00
+
+/*
+** ICW2
+*/
+
+#if defined(AT386) || defined(ATX86_64)
+#define PICM_VECTBASE 0x20
+#define PICS_VECTBASE PICM_VECTBASE + 0x08
+#endif /* defined(AT386) */
+
+/*
+** ICW3
+*/
+
+#define SLAVE_ON_IR0 0x01
+#define SLAVE_ON_IR1 0x02
+#define SLAVE_ON_IR2 0x04
+#define SLAVE_ON_IR3 0x08
+#define SLAVE_ON_IR4 0x10
+#define SLAVE_ON_IR5 0x20
+#define SLAVE_ON_IR6 0x40
+#define SLAVE_ON_IR7 0x80
+
+#define I_AM_SLAVE_0 0x00
+#define I_AM_SLAVE_1 0x01
+#define I_AM_SLAVE_2 0x02
+#define I_AM_SLAVE_3 0x03
+#define I_AM_SLAVE_4 0x04
+#define I_AM_SLAVE_5 0x05
+#define I_AM_SLAVE_6 0x06
+#define I_AM_SLAVE_7 0x07
+
+/*
+** ICW4
+*/
+
+#define SNF_MODE_ENA 0x10
+#define SNF_MODE_DIS 0x00
+#define BUFFERD_MODE 0x08
+#define NONBUFD_MODE 0x00
+#define AUTO_EOI_MOD 0x02
+#define NRML_EOI_MOD 0x00
+#define I8086_EMM_MOD 0x01
+#define SET_MCS_MODE 0x00
+
+/*
+** OCW1
+*/
+#define PICM_MASK 0xFF
+#define PICS_MASK 0xFF
+/*
+** OCW2
+*/
+
+#define NON_SPEC_EOI 0x20
+#define SPECIFIC_EOI 0x60
+#define ROT_NON_SPEC 0xA0
+#define SET_ROT_AEOI 0x80
+#define RSET_ROTAEOI 0x00
+#define ROT_SPEC_EOI 0xE0
+#define SET_PRIORITY 0xC0
+#define NO_OPERATION 0x40
+
+#define SEND_EOI_IR0 0x00
+#define SEND_EOI_IR1 0x01
+#define SEND_EOI_IR2 0x02
+#define SEND_EOI_IR3 0x03
+#define SEND_EOI_IR4 0x04
+#define SEND_EOI_IR5 0x05
+#define SEND_EOI_IR6 0x06
+#define SEND_EOI_IR7 0x07
+
+/*
+** OCW3
+*/
+
+#define OCW_TEMPLATE 0x08
+#define SPECIAL_MASK 0x40
+#define MASK_MDE_SET 0x20
+#define MASK_MDE_RST 0x00
+#define POLL_COMMAND 0x04
+#define NO_POLL_CMND 0x00
+#define READ_NEXT_RD 0x02
+#define READ_IR_ONRD 0x00
+#define READ_IS_ONRD 0x01
+
+#define PIC_MASK_ZERO 0x00
+
+#if !defined(__ASSEMBLER__) && !defined(APIC)
+extern void picinit (void);
+extern int curr_pic_mask;
+extern void intnull(int unit);
+extern void mask_irq (unsigned int irq_nr);
+extern void unmask_irq (unsigned int irq_nr);
+#endif /* __ASSEMBLER__ */
+
+#endif /* _I386_PIC_H_ */
diff --git a/riscv/riscv/pmap.h b/riscv/riscv/pmap.h
new file mode 100644
index 0000000..19de7c4
--- /dev/null
+++ b/riscv/riscv/pmap.h
@@ -0,0 +1,28 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+// TODO
+#include <intel/pmap.h>
diff --git a/riscv/riscv/proc_reg.h b/riscv/riscv/proc_reg.h
new file mode 100644
index 0000000..6892507
--- /dev/null
+++ b/riscv/riscv/proc_reg.h
@@ -0,0 +1,402 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Processor registers for i386 and i486.
+ */
+#ifndef _I386_PROC_REG_H_
+#define _I386_PROC_REG_H_
+
+/*
+ * CR0
+ */
+#define CR0_PG 0x80000000 /* enable paging */
+#define CR0_CD 0x40000000 /* i486: cache disable */
+#define CR0_NW 0x20000000 /* i486: no write-through */
+#define CR0_AM 0x00040000 /* i486: alignment check mask */
+#define CR0_WP 0x00010000 /* i486: write-protect kernel access */
+#define CR0_NE 0x00000020 /* i486: handle numeric exceptions */
+#define CR0_ET 0x00000010 /* extension type is 80387 */
+ /* (not official) */
+#define CR0_TS 0x00000008 /* task switch */
+#define CR0_EM 0x00000004 /* emulate coprocessor */
+#define CR0_MP 0x00000002 /* monitor coprocessor */
+#define CR0_PE 0x00000001 /* enable protected mode */
+
+/*
+ * CR3
+ */
+#define CR3_PCD 0x0010 /* Page-level Cache Disable */
+#define CR3_PWT 0x0008 /* Page-level Writes Transparent */
+
+/*
+ * CR4
+ */
+#define CR4_VME 0x0001 /* Virtual-8086 Mode Extensions */
+#define CR4_PVI 0x0002 /* Protected-Mode Virtual Interrupts */
+#define CR4_TSD 0x0004 /* Time Stamp Disable */
+#define CR4_DE 0x0008 /* Debugging Extensions */
+#define CR4_PSE 0x0010 /* Page Size Extensions */
+#define CR4_PAE 0x0020 /* Physical Address Extension */
+#define CR4_MCE 0x0040 /* Machine-Check Enable */
+#define CR4_PGE 0x0080 /* Page Global Enable */
+#define CR4_PCE 0x0100 /* Performance-Monitoring Counter
+ * Enable */
+#define CR4_OSFXSR 0x0200 /* Operating System Support for FXSAVE
+ * and FXRSTOR instructions */
+#define CR4_OSXMMEXCPT 0x0400 /* Operating System Support for Unmasked
+ * SIMD Floating-Point Exceptions */
+#define CR4_OSXSAVE 0x40000 /* Operating System Support for XSAVE
+ * and XRSTOR instructions */
+
+#ifndef __ASSEMBLER__
+#ifdef __GNUC__
+
+static inline unsigned long
+get_eflags(void)
+{
+ unsigned long eflags;
+#ifdef __x86_64__
+ asm("pushfq; popq %0" : "=r" (eflags));
+#else
+ asm("pushfl; popl %0" : "=r" (eflags));
+#endif
+ return eflags;
+}
+
+static inline void
+set_eflags(unsigned long eflags)
+{
+#ifdef __x86_64__
+ asm volatile("pushq %0; popfq" : : "r" (eflags));
+#else
+ asm volatile("pushl %0; popfl" : : "r" (eflags));
+#endif
+}
+
+#define get_esp() \
+ ({ \
+ register unsigned long _temp__ asm("esp"); \
+ _temp__; \
+ })
+
+#ifdef __x86_64__
+#define get_eflags() \
+ ({ \
+ register unsigned long _temp__; \
+ asm("pushfq; popq %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#else
+#define get_eflags() \
+ ({ \
+ register unsigned long _temp__; \
+ asm("pushfl; popl %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#define get_cr0() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr0(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr0" : : "r" (_temp__)); \
+ })
+
+#define get_cr2() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#ifdef MACH_PV_PAGETABLES
+extern unsigned long cr3;
+#define get_cr3() (cr3)
+#define set_cr3(value) \
+ ({ \
+ cr3 = (value); \
+ if (!hyp_set_cr3(value)) \
+ panic("set_cr3"); \
+ })
+#else /* MACH_PV_PAGETABLES */
+#define get_cr3() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr3(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"); \
+ })
+#endif /* MACH_PV_PAGETABLES */
+
+#define flush_tlb() set_cr3(get_cr3())
+
+#ifndef MACH_PV_PAGETABLES
+#define invlpg(addr) \
+ ({ \
+ asm volatile("invlpg (%0)" : : "r" (addr)); \
+ })
+
+#define invlpg_linear(start) \
+ ({ \
+ asm volatile( \
+ "movw %w1,%%es\n" \
+ "\tinvlpg %%es:(%0)\n" \
+ "\tmovw %w2,%%es" \
+ :: "r" (start), "q" (LINEAR_DS), "q" (KERNEL_DS)); \
+ })
+
+#define invlpg_linear_range(start, end) \
+ ({ \
+ register unsigned long var = trunc_page(start); \
+ asm volatile( \
+ "movw %w2,%%es\n" \
+ "1:\tinvlpg %%es:(%0)\n" \
+ "\taddl %c4,%0\n" \
+ "\tcmpl %0,%1\n" \
+ "\tjb 1b\n" \
+ "\tmovw %w3,%%es" \
+ : "+r" (var) : "r" (end), \
+ "q" (LINEAR_DS), "q" (KERNEL_DS), "i" (PAGE_SIZE)); \
+ })
+#endif /* MACH_PV_PAGETABLES */
+
+#define get_cr4() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%cr4, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+
+#define set_cr4(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0, %%cr4" : : "r" (_temp__)); \
+ })
+
+
+#ifdef MACH_RING1
+#define set_ts() \
+ hyp_fpu_taskswitch(1)
+#define clear_ts() \
+ hyp_fpu_taskswitch(0)
+#else /* MACH_RING1 */
+#define set_ts() \
+ set_cr0(get_cr0() | CR0_TS)
+
+#define clear_ts() \
+ asm volatile("clts")
+#endif /* MACH_RING1 */
+
+#define get_tr() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("str %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_tr(seg) \
+ asm volatile("ltr %0" : : "rm" ((unsigned short)(seg)) )
+
+#define get_ldt() \
+ ({ \
+ unsigned short _seg__; \
+ asm volatile("sldt %0" : "=rm" (_seg__) ); \
+ _seg__; \
+ })
+
+#define set_ldt(seg) \
+ asm volatile("lldt %0" : : "rm" ((unsigned short)(seg)) )
+
+/* This doesn't set a processor register,
+ but it's often used immediately after setting one,
+ to flush the instruction queue. */
+#define flush_instr_queue() \
+ asm("jmp 0f\n" \
+ "0:\n")
+
+#ifdef MACH_RING1
+#define get_dr0() hyp_get_debugreg(0)
+#else
+#define get_dr0() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr0, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr0(value) hyp_set_debugreg(0, value)
+#else
+#define set_dr0(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr0" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr1() hyp_get_debugreg(1)
+#else
+#define get_dr1() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr1, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr1(value) hyp_set_debugreg(1, value)
+#else
+#define set_dr1(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr1" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr2() hyp_get_debugreg(2)
+#else
+#define get_dr2() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr2, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr2(value) hyp_set_debugreg(2, value)
+#else
+#define set_dr2(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr2" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr3() hyp_get_debugreg(3)
+#else
+#define get_dr3() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr3, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr3(value) hyp_set_debugreg(3, value)
+#else
+#define set_dr3(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr3" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr6() hyp_get_debugreg(6)
+#else
+#define get_dr6() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr6, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr6(value) hyp_set_debugreg(6, value)
+#else
+#define set_dr6(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr6" : : "r" (_temp__)); \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define get_dr7() hyp_get_debugreg(7)
+#else
+#define get_dr7() \
+ ({ \
+ register unsigned long _temp__; \
+ asm volatile("mov %%dr7, %0" : "=r" (_temp__)); \
+ _temp__; \
+ })
+#endif
+
+#ifdef MACH_RING1
+#define set_dr7(value) hyp_set_debugreg(7, value)
+#else
+#define set_dr7(value) \
+ ({ \
+ register unsigned long _temp__ = (value); \
+ asm volatile("mov %0,%%dr7" : : "r" (_temp__)); \
+ })
+#endif
+
+/* Note: gcc might want to use bx or the stack for %1 addressing, so we can't
+ * use them :/ */
+#ifdef __x86_64__
+#define cpuid(eax, ebx, ecx, edx) \
+{ \
+ uint64_t sav_rbx; \
+ asm( "mov %%rbx,%2\n\t" \
+ "cpuid\n\t" \
+ "xchg %2,%%rbx\n\t" \
+ "movl %k2,%1\n\t" \
+ : "+a" (eax), "=m" (ebx), "=&r" (sav_rbx), "+c" (ecx), "=&d" (edx)); \
+}
+#else
+#define cpuid(eax, ebx, ecx, edx) \
+{ \
+ asm ( "mov %%ebx,%1\n\t" \
+ "cpuid\n\t" \
+ "xchg %%ebx,%1\n\t" \
+ : "+a" (eax), "=&SD" (ebx), "+c" (ecx), "=&d" (edx)); \
+}
+#endif
+
+#endif /* __GNUC__ */
+#endif /* __ASSEMBLER__ */
+
+#endif /* _I386_PROC_REG_H_ */
diff --git a/riscv/riscv/riscvasm.sym b/riscv/riscv/riscvasm.sym
new file mode 100644
index 0000000..4e44083
--- /dev/null
+++ b/riscv/riscv/riscvasm.sym
@@ -0,0 +1,38 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * Copyright (c) 1991 IBM Corporation
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation,
+ * and that the name IBM not be used in advertising or publicity
+ * pertaining to distribution of the software without specific, written
+ * prior permission.
+ *
+ * CARNEGIE MELLON AND IBM ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON AND IBM DISCLAIM ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * Pass field offsets to assembly code.
+ */
+
+void dummy() {
+
+return;
+}
diff --git a/riscv/riscv/setup.c b/riscv/riscv/setup.c
new file mode 100644
index 0000000..db1d49e
--- /dev/null
+++ b/riscv/riscv/setup.c
@@ -0,0 +1,3 @@
+
+/* TODO: pending implementation */
+unsigned long boot_cpu_hartid;
diff --git a/riscv/riscv/smp.c b/riscv/riscv/smp.c
new file mode 100644
index 0000000..36413ef
--- /dev/null
+++ b/riscv/riscv/smp.c
@@ -0,0 +1,199 @@
+/* smp.h - riscv SMP controller for Mach
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#include <string.h>
+#include <riscv/apic.h>
+#include <riscv/smp.h>
+#include <riscv/cpu.h>
+#include <riscv/pio.h>
+#include <riscv/vm_param.h>
+#include <riscvat/idt.h>
+#include <riscvat/cram.h>
+#include <riscvat/acpi_parse_apic.h>
+#include <kern/printf.h>
+#include <mach/machine.h>
+
+#include <kern/smp.h>
+
+/*
+ * smp_data_init: initialize smp_data structure
+ * Must be called after smp_init(), once all APIC structures
+ * has been initialized
+ */
+static void smp_data_init(void)
+{
+ uint8_t numcpus = apic_get_numcpus();
+ smp_set_numcpus(numcpus);
+
+ for(int i = 0; i < numcpus; i++){
+ machine_slot[i].is_cpu = TRUE;
+ }
+
+}
+
+static void smp_send_ipi(unsigned apic_id, unsigned vector)
+{
+ unsigned long flags;
+
+ cpu_intr_save(&flags);
+
+ apic_send_ipi(NO_SHORTHAND, FIXED, PHYSICAL, ASSERT, EDGE, vector, apic_id);
+
+ do {
+ cpu_pause();
+ } while(lapic->icr_low.delivery_status == SEND_PENDING);
+
+ apic_send_ipi(NO_SHORTHAND, FIXED, PHYSICAL, DE_ASSERT, EDGE, vector, apic_id);
+
+ do {
+ cpu_pause();
+ } while(lapic->icr_low.delivery_status == SEND_PENDING);
+
+ cpu_intr_restore(flags);
+}
+
+void smp_remote_ast(unsigned apic_id)
+{
+ smp_send_ipi(apic_id, CALL_AST_CHECK);
+}
+
+void smp_pmap_update(unsigned apic_id)
+{
+ smp_send_ipi(apic_id, CALL_PMAP_UPDATE);
+}
+
+static void
+wait_for_ipi(void)
+{
+ /* This could have a timeout, but if the IPI
+ * is never delivered, its a disaster anyway */
+ while (lapic->icr_low.delivery_status == SEND_PENDING) {
+ cpu_pause();
+ }
+}
+
+static int
+smp_send_ipi_init(int apic_id)
+{
+ int err;
+
+ lapic->error_status.r = 0;
+
+ /* Assert INIT IPI:
+ *
+ * This is EDGE triggered to match the deassert
+ */
+ apic_send_ipi(NO_SHORTHAND, INIT, PHYSICAL, ASSERT, EDGE, 0, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+ hpet_mdelay(10);
+
+ /* Deassert INIT IPI:
+ *
+ * NB: This must be an EDGE triggered deassert signal.
+ * A LEVEL triggered deassert is only supported on very old hardware
+ * that does not support STARTUP IPIs at all, and instead jump
+ * via a warm reset vector.
+ */
+ apic_send_ipi(NO_SHORTHAND, INIT, PHYSICAL, DE_ASSERT, EDGE, 0, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+
+ err = lapic->error_status.r;
+ if (err) {
+ printf("ESR error upon INIT 0x%x\n", err);
+ }
+ return 0;
+}
+
+static int
+smp_send_ipi_startup(int apic_id, int vector)
+{
+ int err;
+
+ lapic->error_status.r = 0;
+
+ /* StartUp IPI:
+ *
+ * Have not seen any documentation for trigger mode for this IPI
+ * but it seems to work with EDGE. (AMD BKDG FAM16h document specifies dont care)
+ */
+ apic_send_ipi(NO_SHORTHAND, STARTUP, PHYSICAL, ASSERT, EDGE, vector, apic_id);
+
+ /* Wait for delivery */
+ wait_for_ipi();
+
+ err = lapic->error_status.r;
+ if (err) {
+ printf("ESR error upon STARTUP 0x%x\n", err);
+ }
+ return 0;
+}
+
+/* See Intel IA32/64 Software Developer's Manual 3A Section 8.4.4.1 */
+int smp_startup_cpu(unsigned apic_id, phys_addr_t start_eip)
+{
+#if 0
+ /* This block goes with a legacy method of INIT that only works with
+ * old hardware that does not support SIPIs.
+ * Must use INIT DEASSERT LEVEL triggered IPI to use this block.
+ * (At least one AMD FCH does not support this IPI mode,
+ * See AMD BKDG FAM16h document # 48751 page 461).
+ */
+
+ /* Tell CMOS to warm reset through through 40:67 */
+ outb(CMOS_ADDR, CMOS_SHUTDOWN);
+ outb(CMOS_DATA, CM_JMP_467);
+
+ /* Set warm reset vector to point to AP startup code */
+ uint16_t dword[2];
+ dword[0] = 0;
+ dword[1] = start_eip >> 4;
+ memcpy((uint8_t *)phystokv(0x467), dword, 4);
+#endif
+
+ /* Local cache flush */
+ asm("wbinvd":::"memory");
+
+ printf("Sending IPIs to APIC ID %u...\n", apic_id);
+
+ smp_send_ipi_init(apic_id);
+ hpet_mdelay(10);
+ smp_send_ipi_startup(apic_id, start_eip >> STARTUP_VECTOR_SHIFT);
+ hpet_udelay(200);
+ smp_send_ipi_startup(apic_id, start_eip >> STARTUP_VECTOR_SHIFT);
+ hpet_udelay(200);
+
+ printf("done\n");
+ return 0;
+}
+
+/*
+ * smp_init: initialize the SMP support, starting the cpus searching
+ * and enumeration.
+ */
+int smp_init(void)
+{
+ smp_data_init();
+
+ return 0;
+}
diff --git a/riscv/riscv/smp.h b/riscv/riscv/smp.h
new file mode 100644
index 0000000..73d273e
--- /dev/null
+++ b/riscv/riscv/smp.h
@@ -0,0 +1,34 @@
+/* smp.h - i386 SMP controller for Mach. Header file
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Written by Almudena Garcia Jurado-Centurion
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+#ifndef _SMP_H_
+#define _SMP_H_
+
+#include <mach/machine/vm_types.h>
+
+int smp_init(void);
+void smp_remote_ast(unsigned apic_id);
+void smp_pmap_update(unsigned apic_id);
+int smp_startup_cpu(unsigned apic_id, phys_addr_t start_eip);
+
+#define cpu_pause() asm volatile ("pause" : : : "memory")
+#define STARTUP_VECTOR_SHIFT (20 - 8)
+
+#endif
diff --git a/riscv/riscv/spl.h b/riscv/riscv/spl.h
new file mode 100644
index 0000000..8e6454d
--- /dev/null
+++ b/riscv/riscv/spl.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _MACHINE_SPL_H_
+#define _MACHINE_SPL_H_
+
+/*
+ * This file defines the interrupt priority levels used by
+ * machine-dependent code.
+ */
+
+typedef int spl_t;
+
+extern spl_t (splhi)(void);
+
+extern spl_t (spl0)(void);
+
+extern spl_t (spl1)(void);
+extern spl_t (splsoftclock)(void);
+
+extern spl_t (spl2)(void);
+
+extern spl_t (spl3)(void);
+
+extern spl_t (spl4)(void);
+extern spl_t (splnet)(void);
+extern spl_t (splhdw)(void);
+
+extern spl_t (spl5)(void);
+extern spl_t (splbio)(void);
+extern spl_t (spldcm)(void);
+
+extern spl_t (spl6)(void);
+extern spl_t (spltty)(void);
+extern spl_t (splimp)(void);
+extern spl_t (splvm)(void);
+
+extern spl_t (spl7)(void);
+extern spl_t (splclock)(void);
+extern spl_t (splsched)(void);
+extern spl_t (splhigh)(void);
+
+extern spl_t (splx)(spl_t n);
+extern spl_t (splx_cli)(spl_t n);
+
+extern void splon (unsigned long n);
+
+extern unsigned long sploff (void);
+
+extern void setsoftclock (void);
+extern int spl_init;
+
+/* XXX Include each other... */
+#include <riscv/ipl.h>
+
+#endif /* _MACHINE_SPL_H_ */
diff --git a/riscv/riscv/task.h b/riscv/riscv/task.h
new file mode 100644
index 0000000..0060ad4
--- /dev/null
+++ b/riscv/riscv/task.h
@@ -0,0 +1,61 @@
+/* Data types for machine specific parts of tasks on i386.
+
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+
+ Written by Marcus Brinkmann. Glued into GNU Mach by Thomas Schwinge.
+
+ This file is part of GNU Mach.
+
+ GNU Mach is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef _I386_TASK_H_
+#define _I386_TASK_H_
+
+#include <kern/kern_types.h>
+#include <kern/slab.h>
+
+/* The machine specific data of a task. */
+struct machine_task
+{
+ /* A lock protecting iopb_size and iopb. */
+ decl_simple_lock_data (, iopb_lock);
+
+ /* The highest I/O port number enabled. */
+ int iopb_size;
+
+ /* The I/O permission bitmap. */
+ unsigned char *iopb;
+};
+typedef struct machine_task machine_task_t;
+
+
+extern struct kmem_cache machine_task_iopb_cache;
+
+/* Initialize the machine task module. The function is called once at
+ start up by task_init in kern/task.c. */
+void machine_task_module_init (void);
+
+/* Initialize the machine specific part of task TASK. */
+void machine_task_init (task_t);
+
+/* Destroy the machine specific part of task TASK and release all
+ associated resources. */
+void machine_task_terminate (task_t);
+
+/* Try to release as much memory from the machine specific data in
+ task TASK. */
+void machine_task_collect (task_t);
+
+#endif /* _I386_TASK_H_ */
diff --git a/riscv/riscv/thread.h b/riscv/riscv/thread.h
new file mode 100644
index 0000000..51d4b51
--- /dev/null
+++ b/riscv/riscv/thread.h
@@ -0,0 +1,278 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: machine/thread.h
+ *
+ * This file contains the structure definitions for the thread
+ * state as applied to RISCV processors.
+ */
+
+#ifndef _RISCV_THREAD_H_
+#define _RISCV_THREAD_H_
+
+#include <mach/boolean.h>
+#include <mach/machine/vm_types.h>
+#include <mach/machine/fp_reg.h>
+#include <mach/machine/thread_status.h>
+
+#include <kern/lock.h>
+
+// TODO
+//#define USER_GDT_SLOTS 1
+//#include "gdt.h"
+
+/*
+ * i386_saved_state:
+ *
+ * This structure corresponds to the state of user registers
+ * as saved upon kernel entry. It lives in the pcb.
+ * It is also pushed onto the stack for exceptions in the kernel.
+ */
+
+struct i386_saved_state {
+#if !defined(__x86_64__) || defined(USER32)
+ unsigned long gs;
+ unsigned long fs;
+ unsigned long es;
+ unsigned long ds;
+#endif
+#ifdef __x86_64__
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+#endif
+ unsigned long edi;
+ unsigned long esi;
+ unsigned long ebp;
+ unsigned long cr2; /* kernel esp stored by pusha -
+ we save cr2 here later */
+ unsigned long ebx;
+ unsigned long edx;
+ unsigned long ecx;
+ unsigned long eax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long eip;
+ unsigned long cs;
+ unsigned long efl;
+ unsigned long uesp;
+ unsigned long ss;
+#if !defined(__x86_64__) || defined(USER32)
+ struct v86_segs {
+ unsigned long v86_es; /* virtual 8086 segment registers */
+ unsigned long v86_ds;
+ unsigned long v86_fs;
+ unsigned long v86_gs;
+ } v86_segs;
+#endif
+};
+
+/*
+ * i386_exception_link:
+ *
+ * This structure lives at the high end of the kernel stack.
+ * It points to the current thread`s user registers.
+ */
+struct i386_exception_link {
+ struct i386_saved_state *saved_state;
+};
+
+/*
+ * i386_kernel_state:
+ *
+ * This structure corresponds to the state of kernel registers
+ * as saved in a context-switch. It lives at the base of the stack.
+ */
+
+struct i386_kernel_state {
+ long k_ebx; /* kernel context */
+ long k_esp;
+ long k_ebp;
+#ifdef __i386__
+ long k_edi;
+ long k_esi;
+#endif
+ long k_eip;
+#ifdef __x86_64__
+ long k_r12;
+ long k_r13;
+ long k_r14;
+ long k_r15;
+#endif
+};
+
+/*
+ * Save area for user floating-point state.
+ * Allocated only when necessary.
+ */
+
+struct i386_fpsave_state {
+ boolean_t fp_valid;
+
+ union {
+ struct {
+ struct i386_fp_save fp_save_state;
+ struct i386_fp_regs fp_regs;
+ };
+ struct i386_xfp_save xfp_save_state;
+ };
+};
+
+#if !defined(__x86_64__) || defined(USER32)
+/*
+ * v86_assist_state:
+ *
+ * This structure provides data to simulate 8086 mode
+ * interrupts. It lives in the pcb.
+ */
+
+struct v86_assist_state {
+ vm_offset_t int_table;
+ unsigned short int_count;
+ unsigned short flags; /* 8086 flag bits */
+};
+#define V86_IF_PENDING 0x8000 /* unused bit */
+#endif
+
+#if defined(__x86_64__) && !defined(USER32)
+struct i386_segment_base_state {
+ unsigned long fsbase;
+ unsigned long gsbase;
+};
+#endif
+
+/*
+ * i386_interrupt_state:
+ *
+ * This structure describes the set of registers that must
+ * be pushed on the current ring-0 stack by an interrupt before
+ * we can switch to the interrupt stack.
+ */
+
+struct i386_interrupt_state {
+#if !defined(__x86_64__) || defined(USER32)
+ long gs;
+ long fs;
+ long es;
+ long ds;
+#endif
+#ifdef __x86_64__
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long rdi;
+ long rsi;
+#endif
+ long edx;
+ long ecx;
+ long eax;
+ long eip;
+ long cs;
+ long efl;
+};
+
+/*
+ * i386_machine_state:
+ *
+ * This structure corresponds to special machine state.
+ * It lives in the pcb. It is not saved by default.
+ */
+
+struct i386_machine_state {
+ struct user_ldt * ldt;
+ struct i386_fpsave_state *ifps;
+#if !defined(__x86_64__) || defined(USER32)
+ struct v86_assist_state v86s;
+#endif
+ //struct real_descriptor user_gdt[10];
+ struct i386_debug_state ids;
+#if defined(__x86_64__) && !defined(USER32)
+ struct i386_segment_base_state sbs;
+#endif
+};
+
+typedef struct pcb {
+ /* START of the exception stack.
+ * NOTE: this area is used as exception stack when switching
+ * CPL, and it MUST be big enough to save the thread state and
+ * switch to a proper stack area, even considering recursive
+ * exceptions, otherwise it could corrupt nearby memory */
+ struct i386_interrupt_state iis[2]; /* interrupt and NMI */
+#ifdef __x86_64__
+ unsigned long pad; /* ensure exception stack is aligned to 16 */
+#endif
+ struct i386_saved_state iss;
+ /* END of exception stack*/
+ struct i386_machine_state ims;
+ decl_simple_lock_data(, lock)
+ unsigned short init_control; /* Initial FPU control to set */
+#ifdef LINUX_DEV
+ void *data;
+#endif /* LINUX_DEV */
+} *pcb_t;
+
+/*
+ * On the kernel stack is:
+ * stack: ...
+ * struct i386_exception_link
+ * struct i386_kernel_state
+ * stack+KERNEL_STACK_SIZE
+ */
+
+#define STACK_IKS(stack) \
+ ((struct i386_kernel_state *)((stack) + KERNEL_STACK_SIZE) - 1)
+#define STACK_IEL(stack) \
+ ((struct i386_exception_link *)STACK_IKS(stack) - 1)
+
+#ifdef __x86_64__
+#define KERNEL_STACK_ALIGN 16
+#else
+#define KERNEL_STACK_ALIGN 4
+#endif
+
+#if defined(__x86_64__) && !defined(USER32)
+/* Follow System V AMD64 ABI guidelines. */
+#define USER_STACK_ALIGN 16
+#else
+#define USER_STACK_ALIGN 4
+#endif
+
+#define USER_REGS(thread) (&(thread)->pcb->iss)
+
+
+#define syscall_emulation_sync(task) /* do nothing */
+
+
+/* #include_next "thread.h" */
+
+
+#endif /* _RISCV_THREAD_H_ */
diff --git a/riscv/riscv/trap.c b/riscv/riscv/trap.c
new file mode 100644
index 0000000..db4c702
--- /dev/null
+++ b/riscv/riscv/trap.c
@@ -0,0 +1,675 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Hardware trap/fault handler.
+ */
+
+#include <sys/types.h>
+#include <string.h>
+
+#include <mach/machine/eflags.h>
+#include <i386/trap.h>
+#include <i386/fpu.h>
+#include <i386/locore.h>
+#include <i386/model_dep.h>
+#include <intel/read_fault.h>
+#include <machine/machspl.h> /* for spl_t */
+#include <machine/db_interface.h>
+
+#include <mach/exception.h>
+#include <mach/kern_return.h>
+#include "vm_param.h"
+#include <mach/machine/thread_status.h>
+
+#include <vm/vm_fault.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+
+#include <kern/ast.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <kern/task.h>
+#include <kern/sched.h>
+#include <kern/sched_prim.h>
+#include <kern/exception.h>
+
+#if MACH_KDB
+#include <ddb/db_break.h>
+#include <ddb/db_run.h>
+#include <ddb/db_watch.h>
+#endif
+
+#include "debug.h"
+
+#if MACH_KDB
+boolean_t debug_all_traps_with_kdb = FALSE;
+extern struct db_watchpoint *db_watchpoint_list;
+extern boolean_t db_watchpoints_inserted;
+
+void
+thread_kdb_return(void)
+{
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+}
+#endif /* MACH_KDB */
+
+#if MACH_TTD
+extern boolean_t kttd_enabled;
+boolean_t debug_all_traps_with_kttd = TRUE;
+#endif /* MACH_TTD */
+
+static void
+user_page_fault_continue(kern_return_t kr)
+{
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
+
+ if (kr == KERN_SUCCESS) {
+#if MACH_KDB
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (regs->err & T_PF_WRITE) &&
+ db_find_watchpoint(thread->task->map,
+ (vm_offset_t)regs->cr2,
+ regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+#endif /* MACH_KDB */
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+
+#if MACH_KDB
+ if (debug_all_traps_with_kdb &&
+ kdb_trap(regs->trapno, regs->err, regs)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+#endif /* MACH_KDB */
+
+ i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
+ /*NOTREACHED*/
+}
+
+
+static char *trap_type[] = {
+ "Divide error",
+ "Debug trap",
+ "NMI",
+ "Breakpoint",
+ "Overflow",
+ "Bounds check",
+ "Invalid opcode",
+ "No coprocessor",
+ "Double fault",
+ "Coprocessor overrun",
+ "Invalid TSS",
+ "Segment not present",
+ "Stack bounds",
+ "General protection",
+ "Page fault",
+ "(reserved)",
+ "Coprocessor error"
+};
+#define TRAP_TYPES (sizeof(trap_type)/sizeof(trap_type[0]))
+
+char *trap_name(unsigned int trapnum)
+{
+ return trapnum < TRAP_TYPES ? trap_type[trapnum] : "(unknown)";
+}
+
+/*
+ * Trap from kernel mode. Only page-fault errors are recoverable,
+ * and then only in special circumstances. All other errors are
+ * fatal.
+ */
+void kernel_trap(struct i386_saved_state *regs)
+{
+ unsigned long code;
+ unsigned long subcode;
+ unsigned long type;
+ vm_map_t map;
+ kern_return_t result;
+ thread_t thread;
+ extern char _start[], etext[];
+
+ type = regs->trapno;
+ code = regs->err;
+ thread = current_thread();
+
+#if 0
+((short*)0xb8700)[0] = 0x0f00+'K';
+((short*)0xb8700)[1] = 0x0f30+(type / 10);
+((short*)0xb8700)[2] = 0x0f30+(type % 10);
+#endif
+#if 0
+printf("kernel trap %d error %d\n", (int) type, (int) code);
+dump_ss(regs);
+#endif
+
+ switch (type) {
+ case T_NO_FPU:
+ fpnoextflt();
+ return;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return;
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return;
+
+ case T_PAGE_FAULT:
+
+ /* Get faulting linear address */
+ subcode = regs->cr2;
+#if 0
+ printf("kernel page fault at linear address %08x\n", subcode);
+#endif
+
+ /* If it's in the kernel linear address region,
+ convert it to a kernel virtual address
+ and use the kernel map to process the fault. */
+ if (lintokv(subcode) == 0 ||
+ subcode >= LINEAR_MIN_KERNEL_ADDRESS) {
+#if 0
+ printf("%08x in kernel linear address range\n", subcode);
+#endif
+ map = kernel_map;
+ subcode = lintokv(subcode);
+#if 0
+ printf("now %08x\n", subcode);
+#endif
+ if (trunc_page(subcode) == 0
+ || (subcode >= (long)_start
+ && subcode < (long)etext)) {
+ printf("Kernel page fault at address 0x%lx, "
+ "eip = 0x%lx\n",
+ subcode, regs->eip);
+ goto badtrap;
+ }
+ } else {
+ if (thread)
+ map = thread->task->map;
+ if (!thread || map == kernel_map) {
+ printf("kernel page fault at %08lx:\n", subcode);
+ dump_ss(regs);
+ panic("kernel thread accessed user space!\n");
+ }
+ }
+
+ /*
+ * Since the 386 ignores write protection in
+ * kernel mode, always try for write permission
+ * first. If that fails and the fault was a
+ * read fault, retry with read permission.
+ */
+ result = vm_fault(map,
+ trunc_page((vm_offset_t)subcode),
+#if !(__i486__ || __i586__ || __i686__)
+ VM_PROT_READ|VM_PROT_WRITE,
+#else
+ (code & T_PF_WRITE)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : VM_PROT_READ,
+#endif
+ FALSE,
+ FALSE,
+ (void (*)()) 0);
+#if MACH_KDB
+ if (result == KERN_SUCCESS) {
+ /* Look for watchpoints */
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (code & T_PF_WRITE) &&
+ db_find_watchpoint(map,
+ (vm_offset_t)subcode, regs))
+ kdb_trap(T_WATCHPOINT, 0, regs);
+ }
+ else
+#endif /* MACH_KDB */
+#if !(__i486__ || __i586__ || __i686__)
+ if ((code & T_PF_WRITE) == 0 &&
+ result == KERN_PROTECTION_FAILURE)
+ {
+ /*
+ * Must expand vm_fault by hand,
+ * so that we can ask for read-only access
+ * but enter a (kernel)writable mapping.
+ */
+ result = intel_read_fault(map,
+ trunc_page((vm_offset_t)subcode));
+ }
+#else
+ ;
+#endif
+
+ if (result == KERN_SUCCESS) {
+ /*
+ * Certain faults require that we back up
+ * the EIP.
+ */
+ struct recovery *rp;
+
+ /* Linear searching; but the list is small enough. */
+ for (rp = retry_table; rp < retry_table_end; rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ break;
+ }
+ }
+ return;
+ }
+
+ /*
+ * If there is a failure recovery address
+ * for this fault, go there.
+ */
+ {
+ struct recovery *rp;
+
+ /* Linear searching; but the list is small enough. */
+ for (rp = recover_table;
+ rp < recover_table_end;
+ rp++) {
+ if (regs->eip == rp->fault_addr) {
+ regs->eip = rp->recover_addr;
+ return;
+ }
+ }
+ }
+
+ /*
+ * Check thread recovery address also -
+ * v86 assist uses it.
+ */
+ if (thread->recover) {
+ regs->eip = thread->recover;
+ thread->recover = 0;
+ return;
+ }
+
+ /*
+ * Unanticipated page-fault errors in kernel
+ * should not happen.
+ */
+ /* fall through */
+
+ default:
+ badtrap:
+ printf("Kernel ");
+ if (type < TRAP_TYPES)
+ printf("%s trap", trap_type[type]);
+ else
+ printf("trap %ld", type);
+ printf(", eip 0x%lx, code %lx, cr2 %lx\n", regs->eip, code, regs->cr2);
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, code, regs))
+ return;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, code, regs))
+ return;
+#endif /* MACH_KDB */
+ splhigh();
+ printf("kernel trap, type %ld, code = %lx\n",
+ type, code);
+ dump_ss(regs);
+ panic("trap");
+ return;
+ }
+}
+
+
+/*
+ * Trap from user mode.
+ * Return TRUE if from emulated system call.
+ */
+int user_trap(struct i386_saved_state *regs)
+{
+ int exc = 0; /* Suppress gcc warning */
+ unsigned long code;
+ unsigned long subcode;
+ unsigned long type;
+ thread_t thread = current_thread();
+
+#ifdef __x86_64__
+ assert(regs == &thread->pcb->iss);
+#endif
+
+ type = regs->trapno;
+ code = 0;
+ subcode = 0;
+
+#if 0
+ ((short*)0xb8700)[3] = 0x0f00+'U';
+ ((short*)0xb8700)[4] = 0x0f30+(type / 10);
+ ((short*)0xb8700)[5] = 0x0f30+(type % 10);
+#endif
+#if 0
+ printf("user trap %d error %d\n", type, code);
+ dump_ss(regs);
+#endif
+
+ switch (type) {
+
+ case T_DIVIDE_ERROR:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_DIV;
+ break;
+
+ case T_DEBUG:
+#if MACH_TTD
+ if (kttd_enabled && kttd_in_single_step()) {
+ if (kttd_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (db_in_single_step()) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+#endif /* MACH_KDB */
+ /* Make the content of the debug status register (DR6)
+ available to user space. */
+ if (thread->pcb)
+ thread->pcb->ims.ids.dr[6] = get_dr6() & 0x600F;
+ set_dr6(0);
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_SGL;
+ break;
+
+ case T_INT3:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+ break;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ {
+ if (db_find_breakpoint_here(
+ (current_thread())? current_thread()->task: TASK_NULL,
+ regs->eip - 1)) {
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+ }
+ }
+#endif /* MACH_KDB */
+ exc = EXC_BREAKPOINT;
+ code = EXC_I386_BPT;
+ break;
+
+ case T_OVERFLOW:
+ exc = EXC_ARITHMETIC;
+ code = EXC_I386_INTO;
+ break;
+
+ case T_OUT_OF_BOUNDS:
+ exc = EXC_SOFTWARE;
+ code = EXC_I386_BOUND;
+ break;
+
+ case T_INVALID_OPCODE:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVOP;
+ break;
+
+ case T_NO_FPU:
+ case 32: /* XXX */
+ fpnoextflt();
+ return 0;
+
+ case T_FPU_FAULT:
+ fpextovrflt();
+ return 0;
+
+ case 10: /* invalid TSS == iret with NT flag set */
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_INVTSSFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_SEGMENT_NOT_PRESENT:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_SEGNPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_STACK_FAULT:
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_STKFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_GENERAL_PROTECTION:
+ /* Check for an emulated int80 system call.
+ NetBSD-current and Linux use trap instead of call gate. */
+ if (thread->task->eml_dispatch) {
+ unsigned char opcode, intno;
+
+ opcode = inst_fetch(regs->eip, regs->cs);
+ intno = inst_fetch(regs->eip+1, regs->cs);
+ if (opcode == 0xcd && intno == 0x80) {
+ regs->eip += 2;
+ return 1;
+ }
+ }
+#ifdef __x86_64__
+ {
+ unsigned char opcode, addr[4], seg[2];
+ int i;
+
+ opcode = inst_fetch(regs->eip, regs->cs);
+ for (i = 0; i < 4; i++)
+ addr[i] = inst_fetch(regs->eip+i+1, regs->cs);
+ (void) addr;
+ for (i = 0; i < 2; i++)
+ seg[i] = inst_fetch(regs->eip+i+5, regs->cs);
+ if (opcode == 0x9a && seg[0] == 0x7 && seg[1] == 0) {
+ regs->eip += 7;
+ return 1;
+ }
+ }
+#endif
+ exc = EXC_BAD_INSTRUCTION;
+ code = EXC_I386_GPFLT;
+ subcode = regs->err & 0xffff;
+ break;
+
+ case T_PAGE_FAULT:
+ subcode = regs->cr2;
+#if 0
+ printf("user page fault at linear address %08x\n", subcode);
+ dump_ss (regs);
+
+#endif
+ if (subcode >= LINEAR_MIN_KERNEL_ADDRESS)
+ i386_exception(EXC_BAD_ACCESS, EXC_I386_PGFLT, subcode);
+ (void) vm_fault(thread->task->map,
+ trunc_page((vm_offset_t)subcode),
+ (regs->err & T_PF_WRITE)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : VM_PROT_READ,
+ FALSE,
+ FALSE,
+ user_page_fault_continue);
+ /*NOTREACHED*/
+ break;
+
+#ifdef MACH_PV_PAGETABLES
+ case 15:
+ {
+ static unsigned count = 0;
+ count++;
+ if (!(count % 10000))
+ printf("%d 4gb segments accesses\n", count);
+ if (count > 1000000) {
+ printf("A million 4gb segment accesses, stopping reporting them.");
+ if (hyp_vm_assist(VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify))
+ panic("couldn't disable 4gb segments vm assist notify");
+ }
+ return 0;
+ }
+#endif /* MACH_PV_PAGETABLES */
+
+ case T_FLOATING_POINT_ERROR:
+ fpexterrflt();
+ return 0;
+
+ default:
+#if MACH_TTD
+ if (kttd_enabled && kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if (kdb_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_KDB */
+ splhigh();
+ printf("user trap, type %ld, code = %lx\n",
+ type, regs->err);
+ dump_ss(regs);
+ panic("trap");
+ return 0;
+ }
+
+#if MACH_TTD
+ if ((debug_all_traps_with_kttd || thread->task->essential) &&
+ kttd_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_TTD */
+#if MACH_KDB
+ if ((debug_all_traps_with_kdb || thread->task->essential) &&
+ kdb_trap(type, regs->err, regs))
+ return 0;
+#endif /* MACH_KDB */
+
+ i386_exception(exc, code, subcode);
+ /*NOTREACHED*/
+}
+
+#define V86_IRET_PENDING 0x4000
+
+/*
+ * Handle AST traps for i386.
+ * Check for delayed floating-point exception from
+ * AT-bus machines.
+ */
+void
+i386_astintr(void)
+{
+ (void) splsched(); /* block interrupts to check reasons */
+#ifndef MACH_RING1
+ int mycpu = cpu_number();
+
+ if (need_ast[mycpu] & AST_I386_FP) {
+ /*
+ * AST was for delayed floating-point exception -
+ * FP interrupt occurred while in kernel.
+ * Turn off this AST reason and handle the FPU error.
+ */
+ ast_off(mycpu, AST_I386_FP);
+ (void) spl0();
+
+ fpastintr();
+ }
+ else
+#endif /* MACH_RING1 */
+ {
+ /*
+ * Not an FPU trap. Handle the AST.
+ * Interrupts are still blocked.
+ */
+ ast_taken();
+ }
+}
+
+/*
+ * Handle exceptions for i386.
+ *
+ * If we are an AT bus machine, we must turn off the AST for a
+ * delayed floating-point exception.
+ *
+ * If we are providing floating-point emulation, we may have
+ * to retrieve the real register values from the floating point
+ * emulator.
+ */
+void
+i386_exception(
+ int exc,
+ int code,
+ long subcode)
+{
+ spl_t s;
+
+ /*
+ * Turn off delayed FPU error handling.
+ */
+ s = splsched();
+ ast_off(cpu_number(), AST_I386_FP);
+ splx(s);
+
+ exception(exc, code, subcode);
+ /*NOTREACHED*/
+}
+
+#if MACH_PCSAMPLE > 0
+/*
+ * return saved state for interrupted user thread
+ */
+unsigned
+interrupted_pc(const thread_t t)
+{
+ struct i386_saved_state *iss;
+
+ iss = USER_REGS(t);
+ return iss->eip;
+}
+#endif /* MACH_PCSAMPLE > 0 */
+
+#if MACH_KDB
+
+void
+db_debug_all_traps (boolean_t enable)
+{
+ debug_all_traps_with_kdb = enable;
+}
+
+#endif /* MACH_KDB */
+
+void handle_double_fault(struct i386_saved_state *regs)
+{
+ dump_ss(regs);
+ panic("DOUBLE FAULT! This is critical\n");
+}
diff --git a/riscv/riscv/trap.h b/riscv/riscv/trap.h
new file mode 100644
index 0000000..e82164d
--- /dev/null
+++ b/riscv/riscv/trap.h
@@ -0,0 +1,70 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _I386_TRAP_H_
+#define _I386_TRAP_H_
+
+#include <mach/machine/trap.h>
+
+#ifndef __ASSEMBLER__
+#include <mach/mach_types.h>
+
+char *trap_name(unsigned int trapnum);
+
+unsigned int interrupted_pc(thread_t);
+
+void
+i386_exception(
+ int exc,
+ int code,
+ long subcode) __attribute__ ((noreturn));
+
+extern void
+thread_kdb_return(void);
+
+/*
+ * Trap from kernel mode. Only page-fault errors are recoverable,
+ * and then only in special circumstances. All other errors are
+ * fatal.
+ */
+void kernel_trap(struct i386_saved_state *regs);
+
+/*
+ * Trap from user mode.
+ * Return TRUE if from emulated system call.
+ */
+int user_trap(struct i386_saved_state *regs);
+
+/*
+ * Handle AST traps for i386.
+ * Check for delayed floating-point exception from
+ * AT-bus machines.
+ */
+void i386_astintr(void);
+
+#endif /* !__ASSEMBLER__ */
+
+#endif /* _I386_TRAP_H_ */
diff --git a/riscv/riscv/vm_param.h b/riscv/riscv/vm_param.h
new file mode 100644
index 0000000..056aa52
--- /dev/null
+++ b/riscv/riscv/vm_param.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+#ifndef _I386_KERNEL_I386_VM_PARAM_
+#define _I386_KERNEL_I386_VM_PARAM_
+
+#include <kern/macros.h>
+
+/* XXX use xu/vm_param.h */
+#include <mach/vm_param.h>
+#ifdef MACH_PV_PAGETABLES
+#include <xen/public/xen.h>
+#endif
+
+/* To avoid ambiguity in kernel code, make the name explicit */
+#define VM_MIN_USER_ADDRESS VM_MIN_ADDRESS
+#define VM_MAX_USER_ADDRESS VM_MAX_ADDRESS
+
+/* The kernel address space is usually 1GB, usually starting at virtual address 0. */
+/* This can be changed freely to separate kernel addresses from user addresses
+ * for better trace support in kdb; the _START symbol has to be offset by the
+ * same amount. */
+#ifdef __x86_64__
+#define VM_MIN_KERNEL_ADDRESS KERNEL_MAP_BASE
+#else
+#define VM_MIN_KERNEL_ADDRESS 0xC0000000UL
+#endif
+
+#if defined(MACH_XEN) || defined (__x86_64__)
+/* PV kernels can be loaded directly to the target virtual address */
+#define INIT_VM_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
+#else /* MACH_XEN */
+/* This must remain 0 */
+#define INIT_VM_MIN_KERNEL_ADDRESS 0x00000000UL
+#endif /* MACH_XEN */
+
+#ifdef MACH_PV_PAGETABLES
+#ifdef __i386__
+#if PAE
+#define HYP_VIRT_START HYPERVISOR_VIRT_START_PAE
+#else /* PAE */
+#define HYP_VIRT_START HYPERVISOR_VIRT_START_NONPAE
+#endif /* PAE */
+#define VM_MAX_KERNEL_ADDRESS (HYP_VIRT_START - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#else
+#define HYP_VIRT_START HYPERVISOR_VIRT_START
+#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#endif
+#else /* MACH_PV_PAGETABLES */
+#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+#endif /* MACH_PV_PAGETABLES */
+
+/*
+ * Reserve mapping room for the kernel map, which includes
+ * the device I/O map and the IPC map.
+ */
+#ifdef __x86_64__
+/*
+ * Vm structures are quite bigger on 64 bit.
+ * This should be well enough for 8G of physical memory; on the other hand,
+ * maybe not all of them need to be in directly-mapped memory, see the parts
+ * allocated with pmap_steal_memory().
+ */
+#define VM_KERNEL_MAP_SIZE (512 * 1024 * 1024)
+#else
+#define VM_KERNEL_MAP_SIZE (152 * 1024 * 1024)
+#endif
+
+/* This is the kernel address range in linear addresses. */
+#ifdef __x86_64__
+#define LINEAR_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
+#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffffffffffUL)
+#else
+/* On x86, the kernel virtual address space is actually located
+ at high linear addresses. */
+#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_USER_ADDRESS)
+#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffUL)
+#endif
+
+#ifdef MACH_PV_PAGETABLES
+/* need room for mmu updates (2*8bytes) */
+#define KERNEL_STACK_SIZE (4*I386_PGBYTES)
+#define INTSTACK_SIZE (4*I386_PGBYTES)
+#else /* MACH_PV_PAGETABLES */
+#define KERNEL_STACK_SIZE (1*I386_PGBYTES)
+#define INTSTACK_SIZE (1*I386_PGBYTES)
+#endif /* MACH_PV_PAGETABLES */
+ /* interrupt stack size */
+
+/*
+ * Conversion between 80386 pages and VM pages
+ */
+
+#define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p))))
+#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p))))
+#define vm_to_i386(p) (i386_btop(ptoa(p)))
+
+/*
+ * Physical memory is direct-mapped to virtual memory
+ * starting at virtual address VM_MIN_KERNEL_ADDRESS.
+ */
+#define phystokv(a) ((vm_offset_t)(a) + VM_MIN_KERNEL_ADDRESS)
+/*
+ * This can not be used with virtual mappings, but can be used during bootstrap
+ */
+#define _kvtophys(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS)
+
+/*
+ * Kernel virtual memory is actually at 0xc0000000 in linear addresses.
+ */
+#define kvtolin(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS + LINEAR_MIN_KERNEL_ADDRESS)
+#define lintokv(a) ((vm_offset_t)(a) - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+
+/*
+ * Physical memory properties.
+ */
+#define VM_PAGE_DMA_LIMIT DECL_CONST(0x1000000, UL)
+
+#ifdef MACH_XEN
+/* TODO Completely check Xen physical/virtual layout */
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x400000000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#endif
+#else /* MACH_XEN */
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE + 1)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#else /* __LP64__ */
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE + 1)
+#ifdef PAE
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else /* PAE */
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL)
+#endif /* PAE */
+#endif /* __LP64__ */
+#endif /* MACH_XEN */
+
+/*
+ * Physical segment indexes.
+ */
+#define VM_PAGE_SEG_DMA 0
+
+#if defined(VM_PAGE_DMA32_LIMIT) && (VM_PAGE_DMA32_LIMIT != VM_PAGE_DIRECTMAP_LIMIT)
+
+#if VM_PAGE_DMA32_LIMIT < VM_PAGE_DIRECTMAP_LIMIT
+#define VM_PAGE_SEG_DMA32 (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA32+1)
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DIRECTMAP+1)
+#else /* VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT */
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DMA32 (VM_PAGE_SEG_DIRECTMAP+1)
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DMA32+1)
+#endif
+
+#else
+
+#define VM_PAGE_SEG_DIRECTMAP (VM_PAGE_SEG_DMA+1)
+#define VM_PAGE_SEG_DMA32 VM_PAGE_SEG_DIRECTMAP /* Alias for the DIRECTMAP segment */
+#define VM_PAGE_SEG_HIGHMEM (VM_PAGE_SEG_DIRECTMAP+1)
+#endif
+
+#endif /* _I386_KERNEL_I386_VM_PARAM_ */
diff --git a/riscv/riscv/xpr.h b/riscv/riscv/xpr.h
new file mode 100644
index 0000000..19ef026
--- /dev/null
+++ b/riscv/riscv/xpr.h
@@ -0,0 +1,32 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: xpr.h
+ *
+ * Machine dependent module for the XPR tracing facility.
+ */
+
+#define XPR_TIMESTAMP (0)