未验证 提交 ee1ef10f 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!265 Add LoongArch support

Merge Pull Request from: @Hongchen_Zhang 
 
This series of patches adds support for LoongArch architecture.
- Support information
  New firmware+New World system / Old firmware+New World system
  compile with new compiler
 . The Corresponding new world firmware can be download from:
https://github.com/loongson/Firmware
 . The CLFS system can be used for verification,refer to the
  following link for detail:
   https://github.com/sunhaiyong1978/CLFS-for-LoongArch/releases/tag/6.0
   https://github.com/sunhaiyong1978/CLFS-for-LoongArch/blob/main/CLFS_For_LoongArch64.md#8-%E5%88%9B%E5%BB%BA%E5%90%AF%E5%8A%A8u%E7%9B%98
- Patch from
https://github.com/loongson/linux/tree/loongarch-next ,update to 2022-09-03
- Testing
3a5000+71000, 3C5000+7A1000 boot up,reboot test OK,ltp 24 hour test OK
 
 
Link:https://gitee.com/openeuler/kernel/pulls/265 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -24,13 +24,6 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ...@@ -24,13 +24,6 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_setup() efi_virtmap_load() #define arch_efi_call_virt_setup() efi_virtmap_load()
#define arch_efi_call_virt_teardown() efi_virtmap_unload() #define arch_efi_call_virt_teardown() efi_virtmap_unload()
#define arch_efi_call_virt(p, f, args...) \
({ \
efi_##f##_t *__f; \
__f = p->f; \
__f(args); \
})
#define ARCH_EFI_IRQ_FLAGS_MASK \ #define ARCH_EFI_IRQ_FLAGS_MASK \
(PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \ (PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
PSR_T_BIT | MODE_MASK) PSR_T_BIT | MODE_MASK)
......
...@@ -27,12 +27,9 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ...@@ -27,12 +27,9 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
__efi_fpsimd_begin(); \ __efi_fpsimd_begin(); \
}) })
#undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) \ #define arch_efi_call_virt(p, f, args...) \
({ \ __efi_rt_asm_wrapper((p)->f, #f, args)
efi_##f##_t *__f; \
__f = p->f; \
__efi_rt_asm_wrapper(__f, #f, args); \
})
#define arch_efi_call_virt_teardown() \ #define arch_efi_call_virt_teardown() \
({ \ ({ \
......
...@@ -27,9 +27,6 @@ __efistub_primary_entry_offset = primary_entry - _text; ...@@ -27,9 +27,6 @@ __efistub_primary_entry_offset = primary_entry - _text;
*/ */
__efistub_memcmp = __pi_memcmp; __efistub_memcmp = __pi_memcmp;
__efistub_memchr = __pi_memchr; __efistub_memchr = __pi_memchr;
__efistub_memcpy = __pi_memcpy;
__efistub_memmove = __pi_memmove;
__efistub_memset = __pi_memset;
__efistub_strlen = __pi_strlen; __efistub_strlen = __pi_strlen;
__efistub_strnlen = __pi_strnlen; __efistub_strnlen = __pi_strnlen;
__efistub_strcmp = __pi_strcmp; __efistub_strcmp = __pi_strcmp;
...@@ -38,12 +35,6 @@ __efistub_strrchr = __pi_strrchr; ...@@ -38,12 +35,6 @@ __efistub_strrchr = __pi_strrchr;
__efistub_strchr = __pi_strchr; __efistub_strchr = __pi_strchr;
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; __efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
#ifdef CONFIG_KASAN
__efistub___memcpy = __pi_memcpy;
__efistub___memmove = __pi_memmove;
__efistub___memset = __pi_memset;
#endif
__efistub__text = _text; __efistub__text = _text;
__efistub__end = _end; __efistub__end = _end;
__efistub__edata = _edata; __efistub__edata = _edata;
......
obj-y += kernel/
obj-y += mm/
obj-y += vdso/
# for cleaning
subdir- += boot
# SPDX-License-Identifier: GPL-2.0
config LOONGARCH
bool
default y
select ACPI
select ACPI_GENERIC_GSI if ACPI
select ACPI_MCFG if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SPARSEMEM_ENABLE
select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
select ARCH_WANTS_NO_INSTR
select BUILDTIME_TABLE_SORT
select COMMON_CLK
select EFI
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_CMPDI2
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GPIOLIB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
select HAVE_FAST_GUP
select HANDLE_DOMAIN_IRQ
select HAVE_GENERIC_VDSO
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_SETUP_PER_CPU_AREA if NUMA
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
select OF
select OF_EARLY_FLATTREE
select PCI
select PCI_DOMAINS_GENERIC
select PCI_ECAM if ACPI
select PCI_LOONGSON
select PCI_MSI_ARCH_FALLBACKS
select PCI_QUIRKS
select PERF_USE_VMALLOC
select RTC_LIB
select SMP
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select SWIOTLB
select TRACE_IRQFLAGS_SUPPORT
select USER_STACKTRACE_SUPPORT
select ZONE_DMA32
config 32BIT
bool
config 64BIT
def_bool y
config USE_PERCPU_NUMA_NODE_ID
def_bool y
depends on NUMA
config CPU_HAS_FPU
bool
default y
config CPU_HAS_PREFETCH
bool
default y
config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CSUM
def_bool y
config GENERIC_HWEIGHT
def_bool y
config L1_CACHE_SHIFT
int
default "6"
config LOCKDEP_SUPPORT
bool
default y
config STACKTRACE_SUPPORT
bool
default y
# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
# MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
# are shared between architectures, and specifically expecting the symbols.
config MACH_LOONGSON32
def_bool 32BIT
config MACH_LOONGSON64
def_bool 64BIT
config FIX_EARLYCON_MEM
def_bool y
config PAGE_SIZE_4KB
bool
config PAGE_SIZE_16KB
bool
config PAGE_SIZE_64KB
bool
config PGTABLE_2LEVEL
bool
config PGTABLE_3LEVEL
bool
config PGTABLE_4LEVEL
bool
config PGTABLE_LEVELS
int
default 2 if PGTABLE_2LEVEL
default 3 if PGTABLE_3LEVEL
default 4 if PGTABLE_4LEVEL
config SCHED_OMIT_FRAME_POINTER
bool
default y
config AS_HAS_EXPLICIT_RELOCS
def_bool $(as-instr,x:pcalau12i \$t0$(comma)%pc_hi20(x))
menu "Kernel type and options"
source "kernel/Kconfig.hz"
choice
prompt "Page Table Layout"
default 16KB_2LEVEL if 32BIT
default 16KB_3LEVEL if 64BIT
help
Allows choosing the page table layout, which is a combination
of page size and page table levels. The size of virtual memory
address space are determined by the page table layout.
config 4KB_3LEVEL
bool "4KB with 3 levels"
select PAGE_SIZE_4KB
select PGTABLE_3LEVEL
help
This option selects 4KB page size with 3 level page tables, which
support a maximum of 39 bits of application virtual memory.
config 4KB_4LEVEL
bool "4KB with 4 levels"
select PAGE_SIZE_4KB
select PGTABLE_4LEVEL
help
This option selects 4KB page size with 4 level page tables, which
support a maximum of 48 bits of application virtual memory.
config 16KB_2LEVEL
bool "16KB with 2 levels"
select PAGE_SIZE_16KB
select PGTABLE_2LEVEL
help
This option selects 16KB page size with 2 level page tables, which
support a maximum of 36 bits of application virtual memory.
config 16KB_3LEVEL
bool "16KB with 3 levels"
select PAGE_SIZE_16KB
select PGTABLE_3LEVEL
help
This option selects 16KB page size with 3 level page tables, which
support a maximum of 47 bits of application virtual memory.
config 64KB_2LEVEL
bool "64KB with 2 levels"
select PAGE_SIZE_64KB
select PGTABLE_2LEVEL
help
This option selects 64KB page size with 2 level page tables, which
support a maximum of 42 bits of application virtual memory.
config 64KB_3LEVEL
bool "64KB with 3 levels"
select PAGE_SIZE_64KB
select PGTABLE_3LEVEL
help
This option selects 64KB page size with 3 level page tables, which
support a maximum of 55 bits of application virtual memory.
endchoice
config CMDLINE
string "Built-in kernel command line"
help
For most platforms, the arguments for the kernel's command line
are provided at run-time, during boot. However, there are cases
where either no arguments are being provided or the provided
arguments are insufficient or even invalid.
When that occurs, it is possible to define a built-in command
line here and choose how the kernel should use it later on.
choice
prompt "Kernel command line type"
default CMDLINE_BOOTLOADER
help
Choose how the kernel will handle the provided built-in command
line.
config CMDLINE_BOOTLOADER
bool "Use bootloader kernel arguments if available"
help
Prefer the command-line passed by the boot loader if available.
Use the built-in command line as fallback in case we get nothing
during boot. This is the default behaviour.
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
The command-line arguments provided during boot will be
appended to the built-in command line. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
config CMDLINE_FORCE
bool "Always use the built-in kernel command string"
help
Always use the built-in command line, even if we get one during
boot. This is useful in case you need to override the provided
command line on systems where you don't have or want control
over it.
endchoice
config DMI
bool "Enable DMI scanning"
select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
default y
help
This enables SMBIOS/DMI feature for systems, and scanning of
DMI to identify machine quirks.
config EFI
bool "EFI runtime service support"
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
select EFI_RUNTIME_WRAPPERS
help
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
config EFI_STUB
bool "EFI boot stub support"
default y
depends on EFI
select EFI_GENERIC_STUB
help
This kernel feature allows the kernel to be loaded directly by
EFI firmware without the use of a bootloader.
config SMP
bool "Multi-Processing support"
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y.
If you say N here, the kernel will run on uni- and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
uniprocessor machines. On a uniprocessor machine, the kernel
will run faster if you say N here.
See also the SMP-HOWTO available at <http://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
select GENERIC_IRQ_MIGRATION
help
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu.
(Note: power management support will enable this option
automatically on SMP systems. )
Say N if you want to disable CPU hotplug.
config NR_CPUS
int "Maximum number of CPUs (2-256)"
range 2 256
depends on SMP
default "64"
help
This allows you to specify the maximum number of CPUs which this
kernel will support.
config NUMA
bool "NUMA Support"
select SMP
select ACPI_NUMA if ACPI
help
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
support. This option improves performance on systems with more
than one NUMA node; on single node systems it is generally better
to leave it disabled.
config NODES_SHIFT
int
default "6"
depends on NUMA
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 14 64 if PAGE_SIZE_64KB
default "14" if PAGE_SIZE_64KB
range 12 64 if PAGE_SIZE_16KB
default "12" if PAGE_SIZE_16KB
range 11 64
default "11"
help
The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of
pages. This option selects the largest power of two that the kernel
keeps in the memory allocator. If you need to allocate very large
blocks of physically contiguous memory, then you may need to
increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
The page size is not necessarily 4KB. Keep this in mind
when choosing a value for this option.
config ARCH_IOREMAP
bool "Enable LoongArch DMW-based ioremap()"
help
We use generic TLB-based ioremap() by default since it has page
protection support. However, you can enable LoongArch DMW-based
ioremap() for better performance.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
endmenu
config ARCH_SELECT_MEMORY_MODEL
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
depends on !NUMA
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
help
Say Y to support efficient handling of sparse physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa.rst> for more.
config ARCH_ENABLE_THP_MIGRATION
def_bool y
depends on TRANSPARENT_HUGEPAGE
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
config MMU
bool
default y
config ARCH_MMAP_RND_BITS_MIN
default 12
config ARCH_MMAP_RND_BITS_MAX
default 18
menu "Power management options"
source "drivers/acpi/Kconfig"
endmenu
source "drivers/firmware/Kconfig"
choice
prompt "Choose kernel unwinder"
default UNWINDER_PROLOGUE if KALLSYMS
help
This determines which method will be used for unwinding kernel stack
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
lockdep, and more.
config UNWINDER_GUESS
bool "Guess unwinder"
help
This option enables the "guess" unwinder for unwinding kernel stack
traces. It scans the stack and reports every kernel text address it
finds. Some of the addresses it reports may be incorrect.
While this option often produces false positives, it can still be
useful in many cases.
config UNWINDER_PROLOGUE
bool "Prologue unwinder"
depends on KALLSYMS
help
This option enables the "prologue" unwinder for unwinding kernel stack
traces. It unwind the stack frame based on prologue code analyze. Symbol
information is needed, at least the address and length of each function.
Some of the addresses it reports may be incorrect (but better than the
Guess unwinder).
endchoice
# SPDX-License-Identifier: GPL-2.0
#
# Author: Huacai Chen <chenhuacai@loongson.cn>
# Copyright (C) 2020-2022 Loongson Technology Corporation Limited
boot := arch/loongarch/boot
KBUILD_DEFCONFIG := loongson3_defconfig
ifndef CONFIG_EFI_STUB
KBUILD_IMAGE := $(boot)/vmlinux.elf
else
KBUILD_IMAGE := $(boot)/vmlinux.efi
endif
#
# Select the object file format to substitute into the linker script.
#
64bit-tool-archpref = loongarch64
32bit-bfd = elf32-loongarch
64bit-bfd = elf64-loongarch
32bit-emul = elf32loongarch
64bit-emul = elf64loongarch
ifdef CONFIG_64BIT
tool-archpref = $(64bit-tool-archpref)
UTS_MACHINE := loongarch64
endif
ifneq ($(SUBARCH),$(ARCH))
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-linux-gnu- $(tool-archpref)-unknown-linux-gnu-)
endif
endif
ifdef CONFIG_64BIT
ld-emul = $(64bit-emul)
cflags-y += -mabi=lp64s
endif
cflags-y += -G0 -pipe -msoft-float
LDFLAGS_vmlinux += -G0 -static -n -nostdlib
# When the assembler supports explicit relocation hint, we must use it.
# GCC may have -mexplicit-relocs off by default if it was built with an old
# assembler, so we force it via an option.
#
# When the assembler does not supports explicit relocation hint, we can't use
# it. Disable it if the compiler supports it.
#
# If you've seen "unknown reloc hint" message building the kernel and you are
# now wondering why "-mexplicit-relocs" is not wrapped with cc-option: the
# combination of a "new" assembler and "old" compiler is not supported. Either
# upgrade the compiler or downgrade the assembler.
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += -mexplicit-relocs
else
cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
KBUILD_CFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
cflags-y += -ffreestanding
cflags-y += $(call cc-option, -mno-check-zero-division)
load-y = 0x9000000000200000
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
drivers-$(CONFIG_PCI) += arch/loongarch/pci/
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
# This is required to get dwarf unwinding tables into .debug_frame
# instead of .eh_frame so we don't discard them.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Don't emit unaligned accesses.
# Not all LoongArch cores support unaligned access, and as kernel we can't
# rely on others to provide emulation for these accesses.
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include)
KBUILD_LDFLAGS += -m $(ld-emul)
ifdef CONFIG_LOONGARCH
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
endif
head-y := arch/loongarch/kernel/head.o
core-y += arch/loongarch/
libs-y += arch/loongarch/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
ifeq ($(KBUILD_EXTMOD),)
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/loongarch/vdso include/generated/vdso-offsets.h
endif
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/loongarch/vdso $@
all: $(notdir $(KBUILD_IMAGE))
vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@
install:
$(Q)install -D -m 755 $(KBUILD_IMAGE) $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
$(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
$(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
define archhelp
echo ' install - install kernel into $(INSTALL_PATH)'
echo
endef
# SPDX-License-Identifier: GPL-2.0-only
vmlinux*
#
# arch/loongarch/boot/Makefile
#
# Copyright (C) 2020-2022 Loongson Technology Corporation Limited
#
drop-sections := .comment .note .options .note.gnu.build-id
strip-flags := $(addprefix --remove-section=,$(drop-sections)) -S
OBJCOPYFLAGS_vmlinux.efi := -O binary $(strip-flags)
quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -s -o $@ $<
targets := vmlinux.elf
$(obj)/vmlinux.elf: vmlinux FORCE
$(call if_changed,strip)
targets += vmlinux.efi
$(obj)/vmlinux.efi: vmlinux FORCE
$(call if_changed,objcopy)
EFI_ZBOOT_PAYLOAD := vmlinux.efi
EFI_ZBOOT_BFD_TARGET := elf64-loongarch
EFI_ZBOOT_MACH_TYPE := LOONGARCH64
include $(srctree)/drivers/firmware/efi/libstub/Makefile.zboot
# SPDX-License-Identifier: GPL-2.0-only
dtstree := $(srctree)/$(src)
dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_HOTPLUG_CPU=y
CONFIG_NUMA=y
CONFIG_ACPI_SPCR_TABLE=y
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_IPMI=m
CONFIG_ACPI_PCI_SLOT=y
CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EFI_TEST=m
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_ZPOOL=y
CONFIG_ZBUD=y
CONFIG_Z3FOLD=y
CONFIG_ZSMALLOC=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_IP_MROUTE=y
CONFIG_INET_ESP=m
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BBR=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_MROUTE=y
CONFIG_NETWORK_PHY_TIMESTAMPING=y
CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_ZONES=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_CT_NETLINK_HELPER=m
CONFIG_NETFILTER_NETLINK_GLUE_CT=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
CONFIG_NF_TABLES_NETDEV=y
CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_CONNLIMIT=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_SOCKET=m
CONFIG_NFT_OSF=m
CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LED=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CGROUP=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_IP_SET=m
CONFIG_IP_VS=m
CONFIG_IP_VS_IPV6=y
CONFIG_IP_VS_PROTO_TCP=y
CONFIG_IP_VS_PROTO_UDP=y
CONFIG_IP_VS_RR=m
CONFIG_IP_VS_NFCT=y
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_LOG_ARP=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_SYNPROXY=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_MATCH_SRH=m
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_TARGET_SYNPROXY=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
CONFIG_BRIDGE_EBT_T_NAT=m
CONFIG_BRIDGE_EBT_ARP=m
CONFIG_BRIDGE_EBT_IP=m
CONFIG_BRIDGE_EBT_IP6=m
CONFIG_BPFILTER=y
CONFIG_IP_SCTP=m
CONFIG_RDS=y
CONFIG_L2TP=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_VLAN_8021Q_MVRP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_NET_CLS_CGROUP=m
CONFIG_NET_CLS_BPF=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_BPF=m
CONFIG_DCB=y
CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BT=m
CONFIG_BT_HCIBTUSB=m
# CONFIG_BT_HCIBTUSB_BCM is not set
CONFIG_CFG80211=m
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
CONFIG_RFKILL=m
CONFIG_RFKILL_INPUT=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_PCIEPORTBUS=y
CONFIG_HOTPLUG_PCI_PCIE=y
CONFIG_PCIEAER=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_SHPC=y
CONFIG_PCCARD=m
CONFIG_YENTA=m
CONFIG_RAPIDIO=y
CONFIG_RAPIDIO_TSI721=y
CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
CONFIG_RAPIDIO_ENUM_BASIC=m
CONFIG_RAPIDIO_CHMAN=m
CONFIG_RAPIDIO_MPORT_CDEV=m
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_MTD=m
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=m
CONFIG_MTD_JEDECPROBE=m
CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_CFI_STAA=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
CONFIG_PARPORT_PC_FIFO=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=y
CONFIG_NVME_MULTIPATH=y
CONFIG_NVME_RDMA=m
CONFIG_NVME_FC=m
CONFIG_NVME_TCP=m
CONFIG_NVME_TARGET=m
CONFIG_NVME_TARGET_PASSTHRU=y
CONFIG_NVME_TARGET_LOOP=m
CONFIG_NVME_TARGET_RDMA=m
CONFIG_NVME_TARGET_FC=m
CONFIG_NVME_TARGET_TCP=m
CONFIG_EEPROM_AT24=m
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_SAS_ATA=y
CONFIG_ISCSI_TCP=m
CONFIG_SCSI_MVSAS=y
# CONFIG_SCSI_MVSAS_DEBUG is not set
CONFIG_SCSI_MVSAS_TASKLET=y
CONFIG_SCSI_MVUMI=y
CONFIG_MEGARAID_NEWGEN=y
CONFIG_MEGARAID_MM=y
CONFIG_MEGARAID_MAILBOX=y
CONFIG_MEGARAID_LEGACY=y
CONFIG_MEGARAID_SAS=y
CONFIG_SCSI_MPT2SAS=y
CONFIG_LIBFC=m
CONFIG_LIBFCOE=m
CONFIG_FCOE=m
CONFIG_SCSI_QLOGIC_1280=m
CONFIG_SCSI_QLA_FC=m
CONFIG_TCM_QLA2XXX=m
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_VIRTIO=m
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_PATA_ATIIXP=y
CONFIG_PATA_PCMCIA=m
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_CACHE=m
CONFIG_DM_WRITECACHE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
CONFIG_TCM_PSCSI=m
CONFIG_TCM_USER2=m
CONFIG_LOOPBACK_TARGET=m
CONFIG_ISCSI_TARGET=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=y
CONFIG_WIREGUARD=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
CONFIG_VXLAN=y
CONFIG_RIONET=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_AGERE is not set
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_ALTEON is not set
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
CONFIG_BNX2=y
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T1_1G=y
CONFIG_CHELSIO_T3=m
CONFIG_CHELSIO_T4=m
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_I825XX is not set
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IXGB=y
CONFIG_IXGBE=y
CONFIG_TXGBE=m
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RDC is not set
CONFIG_8139CP=m
CONFIG_8139TOO=m
CONFIG_R8169=y
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
CONFIG_STMMAC_ETH=y
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_AX88179_178A is not set
CONFIG_USB_NET_CDC_EEM=m
CONFIG_USB_NET_HUAWEI_CDC_NCM=m
CONFIG_USB_NET_CDC_MBIM=m
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_BELKIN is not set
# CONFIG_USB_ARMLINUX is not set
# CONFIG_USB_NET_ZAURUS is not set
CONFIG_ATH9K=m
CONFIG_ATH9K_HTC=m
CONFIG_IWLWIFI=m
CONFIG_IWLDVM=m
CONFIG_IWLMVM=m
CONFIG_IWLWIFI_BCAST_FILTERING=y
CONFIG_HOSTAP=m
CONFIG_MT7601U=m
CONFIG_RT2X00=m
CONFIG_RT2800USB=m
CONFIG_RTL8192CE=m
CONFIG_RTL8192SE=m
CONFIG_RTL8192DE=m
CONFIG_RTL8723AE=m
CONFIG_RTL8723BE=m
CONFIG_RTL8188EE=m
CONFIG_RTL8192EE=m
CONFIG_RTL8821AE=m
CONFIG_RTL8192CU=m
# CONFIG_RTLWIFI_DEBUG is not set
CONFIG_RTL8XXXU=m
CONFIG_ZD1211RW=m
CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_XTKBD=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_PS2_SENTELIC=y
CONFIG_MOUSE_SERIAL=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=m
CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_RAW=m
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=16
CONFIG_SERIAL_8250_RUNTIME_UARTS=16
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_PRINTER=m
CONFIG_VIRTIO_CONSOLE=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DEVICE_INTERFACE=m
CONFIG_IPMI_SI=m
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83795=m
CONFIG_SENSORS_W83627HF=m
CONFIG_RC_CORE=m
CONFIG_LIRC=y
CONFIG_RC_DECODERS=y
CONFIG_IR_NEC_DECODER=m
CONFIG_IR_RC5_DECODER=m
CONFIG_IR_RC6_DECODER=m
CONFIG_IR_JVC_DECODER=m
CONFIG_IR_SONY_DECODER=m
CONFIG_IR_SANYO_DECODER=m
CONFIG_IR_SHARP_DECODER=m
CONFIG_IR_MCE_KBD_DECODER=m
CONFIG_IR_XMP_DECODER=m
CONFIG_IR_IMON_DECODER=m
CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
CONFIG_DRM_AMDGPU_SI=y
CONFIG_DRM_AMDGPU_CIK=y
CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_BT87X=m
CONFIG_SND_BT87X_OVERCLOCK=y
CONFIG_SND_HDA_INTEL=y
CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_INPUT_BEEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
CONFIG_SND_HDA_CODEC_REALTEK=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_HDMI=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_USB_AUDIO=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
CONFIG_HID_A4TECH=m
CONFIG_HID_CHERRY=m
CONFIG_HID_LOGITECH=m
CONFIG_HID_LOGITECH_DJ=m
CONFIG_LOGITECH_FF=y
CONFIG_LOGIRUMBLEPAD2_FF=y
CONFIG_LOGIG940_FF=y
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MULTITOUCH=m
CONFIG_HID_SUNPLUS=m
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_UHCI_HCD=m
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
CONFIG_USB_STORAGE=m
CONFIG_USB_STORAGE_REALTEK=m
CONFIG_USB_UAS=m
CONFIG_USB_DWC2=y
CONFIG_USB_DWC2_HOST=y
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_CH341=m
CONFIG_USB_SERIAL_CP210X=m
CONFIG_USB_SERIAL_FTDI_SIO=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_GADGET=y
CONFIG_INFINIBAND=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_DMADEVICES=y
CONFIG_UIO=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_UIO_DMEM_GENIRQ=m
CONFIG_UIO_PCI_GENERIC=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=m
CONFIG_VIRTIO_MMIO=m
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_SCSI=m
CONFIG_VHOST_VSOCK=m
CONFIG_STAGING=y
CONFIG_COMEDI=m
CONFIG_COMEDI_PCI_DRIVERS=m
CONFIG_COMEDI_8255_PCI=m
CONFIG_COMEDI_ADL_PCI6208=m
CONFIG_COMEDI_ADL_PCI7X3X=m
CONFIG_COMEDI_ADL_PCI8164=m
CONFIG_COMEDI_ADL_PCI9111=m
CONFIG_COMEDI_ADL_PCI9118=m
CONFIG_COMEDI_ADV_PCI1710=m
CONFIG_COMEDI_ADV_PCI1720=m
CONFIG_COMEDI_ADV_PCI1723=m
CONFIG_COMEDI_ADV_PCI1724=m
CONFIG_COMEDI_ADV_PCI1760=m
CONFIG_COMEDI_ADV_PCI_DIO=m
CONFIG_COMEDI_NI_LABPC_PCI=m
CONFIG_COMEDI_NI_PCIDIO=m
CONFIG_COMEDI_NI_PCIMIO=m
CONFIG_R8188EU=m
# CONFIG_88EU_AP_MODE is not set
CONFIG_PM_DEVFREQ=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_PWM=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_BTRFS_FS=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=y
CONFIG_OVERLAY_FS_INDEX=y
CONFIG_OVERLAY_FS_XINO_AUTO=y
CONFIG_OVERLAY_FS_METACOPY=y
CONFIG_FSCACHE=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=936
CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_BLOCKLAYOUT=y
CONFIG_CIFS=m
# CONFIG_CIFS_DEBUG is not set
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_936=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_UTF8=y
CONFIG_KEY_DH_OPERATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
# SPDX-License-Identifier: GPL-2.0
generic-y += dma-contiguous.h
generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += user.h
generic-y += stat.h
generic-y += fcntl.h
generic-y += ioctl.h
generic-y += ioctls.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += sembuf.h
generic-y += shmbuf.h
generic-y += statfs.h
generic-y += socket.h
generic-y += sockios.h
generic-y += termios.h
generic-y += termbits.h
generic-y += poll.h
generic-y += param.h
generic-y += posix_types.h
generic-y += resource.h
generic-y += kvm_para.h
/* SPDX-License-Identifier: GPL-2.0 */
/*
* LoongArch specific ACPICA environments and implementation
*
* Author: Jianmin Lv <lvjianmin@loongson.cn>
* Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_LOONGARCH_ACENV_H
#define _ASM_LOONGARCH_ACENV_H
/*
* This header is required by ACPI core, but we have nothing to fill in
* right now. Will be updated later when needed.
*/
#endif /* _ASM_LOONGARCH_ACENV_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author: Jianmin Lv <lvjianmin@loongson.cn>
* Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_LOONGARCH_ACPI_H
#define _ASM_LOONGARCH_ACPI_H
#ifdef CONFIG_ACPI
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_noirq;
#define acpi_os_ioremap acpi_os_ioremap
void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
static inline bool acpi_has_cpu_in_madt(void)
{
return true;
}
extern struct list_head acpi_wakeup_device_list;
#endif /* !CONFIG_ACPI */
#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
#endif /* _ASM_LOONGARCH_ACPI_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1996, 99 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
*/
#ifndef _ASM_ADDRSPACE_H
#define _ASM_ADDRSPACE_H
#include <linux/const.h>
#include <asm/loongarch.h>
/*
* This gives the physical RAM offset.
*/
#ifndef __ASSEMBLY__
#ifndef PHYS_OFFSET
#define PHYS_OFFSET _AC(0, UL)
#endif
extern unsigned long vm_map_base;
#endif /* __ASSEMBLY__ */
#ifndef IO_BASE
#define IO_BASE CSR_DMW0_BASE
#endif
#ifndef CACHE_BASE
#define CACHE_BASE CSR_DMW1_BASE
#endif
#ifndef UNCACHE_BASE
#define UNCACHE_BASE CSR_DMW0_BASE
#endif
#define DMW_PABITS 48
#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1)
/*
* Memory above this physical address will be considered highmem.
*/
#ifndef HIGHMEM_START
#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL))
#endif
#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
#define TO_CACHE(x) (CACHE_BASE | ((x) & TO_PHYS_MASK))
#define TO_UNCACHE(x) (UNCACHE_BASE | ((x) & TO_PHYS_MASK))
/*
* This handles the memory map.
*/
#ifndef PAGE_OFFSET
#define PAGE_OFFSET (CACHE_BASE + PHYS_OFFSET)
#endif
#ifndef FIXADDR_TOP
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
#ifdef __ASSEMBLY__
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
#define _CONST64_(x) x
#else
#define _ATYPE_ __PTRDIFF_TYPE__
#define _ATYPE32_ int
#define _ATYPE64_ __s64
#ifdef CONFIG_64BIT
#define _CONST64_(x) x ## L
#else
#define _CONST64_(x) x ## LL
#endif
#endif
/*
* 32/64-bit LoongArch address spaces
*/
#ifdef __ASSEMBLY__
#define _ACAST32_
#define _ACAST64_
#else
#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */
#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */
#endif
#ifdef CONFIG_32BIT
#define UVRANGE 0x00000000
#define KPRANGE0 0x80000000
#define KPRANGE1 0xa0000000
#define KVRANGE 0xc0000000
#else
#define XUVRANGE _CONST64_(0x0000000000000000)
#define XSPRANGE _CONST64_(0x4000000000000000)
#define XKPRANGE _CONST64_(0x8000000000000000)
#define XKVRANGE _CONST64_(0xc000000000000000)
#endif
/*
* Returns the physical address of a KPRANGEx / XKPRANGE address
*/
#define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK)
/*
* On LoongArch, I/O ports mappring is following:
*
* | .... |
* |-----------------------|
* | pci io ports(16K~32M) |
* |-----------------------|
* | isa io ports(0 ~16K) |
* PCI_IOBASE ->|-----------------------|
* | .... |
*/
#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE)))
#define PCI_IOSIZE SZ_32M
#define ISA_IOSIZE SZ_16K
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
#define ISA_PHY_IOBASE LOONGSON_LIO_BASE
#endif /* _ASM_ADDRSPACE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <generated/asm-offsets.h>
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/uaccess.h>
#include <asm/fpu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/ftrace.h>
#include <asm-generic/asm-prototypes.h>
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Some useful macros for LoongArch assembler code
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle
* Copyright (C) 1999 by Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2002 Maciej W. Rozycki
*/
#ifndef __ASM_ASM_H
#define __ASM_ASM_H
/* LoongArch pref instruction. */
#ifdef CONFIG_CPU_HAS_PREFETCH
#define PREF(hint, addr, offs) \
preld hint, addr, offs; \
#define PREFX(hint, addr, index) \
preldx hint, addr, index; \
#else /* !CONFIG_CPU_HAS_PREFETCH */
#define PREF(hint, addr, offs)
#define PREFX(hint, addr, index)
#endif /* !CONFIG_CPU_HAS_PREFETCH */
/*
* Stack alignment
*/
#define STACK_ALIGN ~(0xf)
/*
* Macros to handle different pointer/register sizes for 32/64-bit code
*/
/*
* Size of a register
*/
#ifndef __loongarch64
#define SZREG 4
#else
#define SZREG 8
#endif
/*
* Use the following macros in assemblercode to load/store registers,
* pointers etc.
*/
#if (SZREG == 4)
#define REG_L ld.w
#define REG_S st.w
#define REG_ADD add.w
#define REG_SUB sub.w
#else /* SZREG == 8 */
#define REG_L ld.d
#define REG_S st.d
#define REG_ADD add.d
#define REG_SUB sub.d
#endif
/*
* How to add/sub/load/store/shift C int variables.
*/
#if (__SIZEOF_INT__ == 4)
#define INT_ADD add.w
#define INT_ADDI addi.w
#define INT_SUB sub.w
#define INT_L ld.w
#define INT_S st.w
#define INT_SLL slli.w
#define INT_SLLV sll.w
#define INT_SRL srli.w
#define INT_SRLV srl.w
#define INT_SRA srai.w
#define INT_SRAV sra.w
#endif
#if (__SIZEOF_INT__ == 8)
#define INT_ADD add.d
#define INT_ADDI addi.d
#define INT_SUB sub.d
#define INT_L ld.d
#define INT_S st.d
#define INT_SLL slli.d
#define INT_SLLV sll.d
#define INT_SRL srli.d
#define INT_SRLV srl.d
#define INT_SRA srai.d
#define INT_SRAV sra.d
#endif
/*
* How to add/sub/load/store/shift C long variables.
*/
#if (__SIZEOF_LONG__ == 4)
#define LONG_ADD add.w
#define LONG_ADDI addi.w
#define LONG_SUB sub.w
#define LONG_L ld.w
#define LONG_S st.w
#define LONG_SLL slli.w
#define LONG_SLLV sll.w
#define LONG_SRL srli.w
#define LONG_SRLV srl.w
#define LONG_SRA srai.w
#define LONG_SRAV sra.w
#ifdef __ASSEMBLY__
#define LONG .word
#endif
#define LONGSIZE 4
#define LONGMASK 3
#define LONGLOG 2
#endif
#if (__SIZEOF_LONG__ == 8)
#define LONG_ADD add.d
#define LONG_ADDI addi.d
#define LONG_SUB sub.d
#define LONG_L ld.d
#define LONG_S st.d
#define LONG_SLL slli.d
#define LONG_SLLV sll.d
#define LONG_SRL srli.d
#define LONG_SRLV srl.d
#define LONG_SRA srai.d
#define LONG_SRAV sra.d
#ifdef __ASSEMBLY__
#define LONG .dword
#endif
#define LONGSIZE 8
#define LONGMASK 7
#define LONGLOG 3
#endif
/*
* How to add/sub/load/store/shift pointers.
*/
#if (__SIZEOF_POINTER__ == 4)
#define PTR_ADD add.w
#define PTR_ADDI addi.w
#define PTR_SUB sub.w
#define PTR_L ld.w
#define PTR_S st.w
#define PTR_LI li.w
#define PTR_SLL slli.w
#define PTR_SLLV sll.w
#define PTR_SRL srli.w
#define PTR_SRLV srl.w
#define PTR_SRA srai.w
#define PTR_SRAV sra.w
#define PTR_SCALESHIFT 2
#ifdef __ASSEMBLY__
#define PTR .word
#endif
#define PTRSIZE 4
#define PTRLOG 2
#endif
#if (__SIZEOF_POINTER__ == 8)
#define PTR_ADD add.d
#define PTR_ADDI addi.d
#define PTR_SUB sub.d
#define PTR_L ld.d
#define PTR_S st.d
#define PTR_LI li.d
#define PTR_SLL slli.d
#define PTR_SLLV sll.d
#define PTR_SRL srli.d
#define PTR_SRLV srl.d
#define PTR_SRA srai.d
#define PTR_SRAV sra.d
#define PTR_SCALESHIFT 3
#ifdef __ASSEMBLY__
#define PTR .dword
#endif
#define PTRSIZE 8
#define PTRLOG 3
#endif
#endif /* __ASM_ASM_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_ASMMACRO_H
#define _ASM_ASMMACRO_H
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
.macro parse_v var val
\var = \val
.endm
.macro parse_r var r
\var = -1
.ifc \r, $r0
\var = 0
.endif
.ifc \r, $r1
\var = 1
.endif
.ifc \r, $r2
\var = 2
.endif
.ifc \r, $r3
\var = 3
.endif
.ifc \r, $r4
\var = 4
.endif
.ifc \r, $r5
\var = 5
.endif
.ifc \r, $r6
\var = 6
.endif
.ifc \r, $r7
\var = 7
.endif
.ifc \r, $r8
\var = 8
.endif
.ifc \r, $r9
\var = 9
.endif
.ifc \r, $r10
\var = 10
.endif
.ifc \r, $r11
\var = 11
.endif
.ifc \r, $r12
\var = 12
.endif
.ifc \r, $r13
\var = 13
.endif
.ifc \r, $r14
\var = 14
.endif
.ifc \r, $r15
\var = 15
.endif
.ifc \r, $r16
\var = 16
.endif
.ifc \r, $r17
\var = 17
.endif
.ifc \r, $r18
\var = 18
.endif
.ifc \r, $r19
\var = 19
.endif
.ifc \r, $r20
\var = 20
.endif
.ifc \r, $r21
\var = 21
.endif
.ifc \r, $r22
\var = 22
.endif
.ifc \r, $r23
\var = 23
.endif
.ifc \r, $r24
\var = 24
.endif
.ifc \r, $r25
\var = 25
.endif
.ifc \r, $r26
\var = 26
.endif
.ifc \r, $r27
\var = 27
.endif
.ifc \r, $r28
\var = 28
.endif
.ifc \r, $r29
\var = 29
.endif
.ifc \r, $r30
\var = 30
.endif
.ifc \r, $r31
\var = 31
.endif
.iflt \var
.error "Unable to parse register name \r"
.endif
.endm
.macro cpu_save_nonscratch thread
stptr.d s0, \thread, THREAD_REG23
stptr.d s1, \thread, THREAD_REG24
stptr.d s2, \thread, THREAD_REG25
stptr.d s3, \thread, THREAD_REG26
stptr.d s4, \thread, THREAD_REG27
stptr.d s5, \thread, THREAD_REG28
stptr.d s6, \thread, THREAD_REG29
stptr.d s7, \thread, THREAD_REG30
stptr.d s8, \thread, THREAD_REG31
stptr.d sp, \thread, THREAD_REG03
stptr.d fp, \thread, THREAD_REG22
.endm
.macro cpu_restore_nonscratch thread
ldptr.d s0, \thread, THREAD_REG23
ldptr.d s1, \thread, THREAD_REG24
ldptr.d s2, \thread, THREAD_REG25
ldptr.d s3, \thread, THREAD_REG26
ldptr.d s4, \thread, THREAD_REG27
ldptr.d s5, \thread, THREAD_REG28
ldptr.d s6, \thread, THREAD_REG29
ldptr.d s7, \thread, THREAD_REG30
ldptr.d s8, \thread, THREAD_REG31
ldptr.d ra, \thread, THREAD_REG01
ldptr.d sp, \thread, THREAD_REG03
ldptr.d fp, \thread, THREAD_REG22
.endm
.macro fpu_save_csr thread tmp
movfcsr2gr \tmp, fcsr0
stptr.w \tmp, \thread, THREAD_FCSR
.endm
.macro fpu_restore_csr thread tmp
ldptr.w \tmp, \thread, THREAD_FCSR
movgr2fcsr fcsr0, \tmp
.endm
.macro fpu_save_cc thread tmp0 tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
movcf2gr \tmp0, $fcc1
bstrins.d \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc2
bstrins.d \tmp1, \tmp0, 23, 16
movcf2gr \tmp0, $fcc3
bstrins.d \tmp1, \tmp0, 31, 24
movcf2gr \tmp0, $fcc4
bstrins.d \tmp1, \tmp0, 39, 32
movcf2gr \tmp0, $fcc5
bstrins.d \tmp1, \tmp0, 47, 40
movcf2gr \tmp0, $fcc6
bstrins.d \tmp1, \tmp0, 55, 48
movcf2gr \tmp0, $fcc7
bstrins.d \tmp1, \tmp0, 63, 56
stptr.d \tmp1, \thread, THREAD_FCC
.endm
.macro fpu_restore_cc thread tmp0 tmp1
ldptr.d \tmp0, \thread, THREAD_FCC
bstrpick.d \tmp1, \tmp0, 7, 0
movgr2cf $fcc0, \tmp1
bstrpick.d \tmp1, \tmp0, 15, 8
movgr2cf $fcc1, \tmp1
bstrpick.d \tmp1, \tmp0, 23, 16
movgr2cf $fcc2, \tmp1
bstrpick.d \tmp1, \tmp0, 31, 24
movgr2cf $fcc3, \tmp1
bstrpick.d \tmp1, \tmp0, 39, 32
movgr2cf $fcc4, \tmp1
bstrpick.d \tmp1, \tmp0, 47, 40
movgr2cf $fcc5, \tmp1
bstrpick.d \tmp1, \tmp0, 55, 48
movgr2cf $fcc6, \tmp1
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
.macro fpu_save_double thread tmp
li.w \tmp, THREAD_FPR0
PTR_ADD \tmp, \tmp, \thread
fst.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0
fst.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0
fst.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0
fst.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0
fst.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0
fst.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0
fst.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0
fst.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0
fst.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0
fst.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0
fst.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
fst.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
fst.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
fst.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
fst.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
fst.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
fst.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
fst.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
fst.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
fst.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
fst.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
fst.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
fst.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
fst.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
fst.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
fst.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
fst.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
fst.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
fst.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
fst.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
fst.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
fst.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
.endm
.macro fpu_restore_double thread tmp
li.w \tmp, THREAD_FPR0
PTR_ADD \tmp, \tmp, \thread
fld.d $f0, \tmp, THREAD_FPR0 - THREAD_FPR0
fld.d $f1, \tmp, THREAD_FPR1 - THREAD_FPR0
fld.d $f2, \tmp, THREAD_FPR2 - THREAD_FPR0
fld.d $f3, \tmp, THREAD_FPR3 - THREAD_FPR0
fld.d $f4, \tmp, THREAD_FPR4 - THREAD_FPR0
fld.d $f5, \tmp, THREAD_FPR5 - THREAD_FPR0
fld.d $f6, \tmp, THREAD_FPR6 - THREAD_FPR0
fld.d $f7, \tmp, THREAD_FPR7 - THREAD_FPR0
fld.d $f8, \tmp, THREAD_FPR8 - THREAD_FPR0
fld.d $f9, \tmp, THREAD_FPR9 - THREAD_FPR0
fld.d $f10, \tmp, THREAD_FPR10 - THREAD_FPR0
fld.d $f11, \tmp, THREAD_FPR11 - THREAD_FPR0
fld.d $f12, \tmp, THREAD_FPR12 - THREAD_FPR0
fld.d $f13, \tmp, THREAD_FPR13 - THREAD_FPR0
fld.d $f14, \tmp, THREAD_FPR14 - THREAD_FPR0
fld.d $f15, \tmp, THREAD_FPR15 - THREAD_FPR0
fld.d $f16, \tmp, THREAD_FPR16 - THREAD_FPR0
fld.d $f17, \tmp, THREAD_FPR17 - THREAD_FPR0
fld.d $f18, \tmp, THREAD_FPR18 - THREAD_FPR0
fld.d $f19, \tmp, THREAD_FPR19 - THREAD_FPR0
fld.d $f20, \tmp, THREAD_FPR20 - THREAD_FPR0
fld.d $f21, \tmp, THREAD_FPR21 - THREAD_FPR0
fld.d $f22, \tmp, THREAD_FPR22 - THREAD_FPR0
fld.d $f23, \tmp, THREAD_FPR23 - THREAD_FPR0
fld.d $f24, \tmp, THREAD_FPR24 - THREAD_FPR0
fld.d $f25, \tmp, THREAD_FPR25 - THREAD_FPR0
fld.d $f26, \tmp, THREAD_FPR26 - THREAD_FPR0
fld.d $f27, \tmp, THREAD_FPR27 - THREAD_FPR0
fld.d $f28, \tmp, THREAD_FPR28 - THREAD_FPR0
fld.d $f29, \tmp, THREAD_FPR29 - THREAD_FPR0
fld.d $f30, \tmp, THREAD_FPR30 - THREAD_FPR0
fld.d $f31, \tmp, THREAD_FPR31 - THREAD_FPR0
.endm
.macro not dst src
nor \dst, \src, zero
.endm
#endif /* _ASM_ASMMACRO_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Atomic operations.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
#define ARCH_ATOMIC
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#if __SIZEOF_LONG__ == 4
#define __LL "ll.w "
#define __SC "sc.w "
#define __AMADD "amadd.w "
#define __AMAND_DB "amand_db.w "
#define __AMOR_DB "amor_db.w "
#define __AMXOR_DB "amxor_db.w "
#elif __SIZEOF_LONG__ == 8
#define __LL "ll.d "
#define __SC "sc.d "
#define __AMADD "amadd.d "
#define __AMAND_DB "amand_db.d "
#define __AMOR_DB "amor_db.d "
#define __AMXOR_DB "amxor_db.d "
#endif
#define ATOMIC_INIT(i) { (i) }
/*
* arch_atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define arch_atomic_read(v) READ_ONCE((v)->counter)
/*
* arch_atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
#define ATOMIC_OP(op, I, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op"_db.w" " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}
#define ATOMIC_FETCH_OP(op, I, asm_op) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
int result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result; \
}
#define ATOMIC_OPS(op, I, asm_op, c_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_OPS(add, i, add, +)
ATOMIC_OPS(sub, -i, add, +)
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, I, asm_op) \
ATOMIC_OP(op, I, asm_op) \
ATOMIC_FETCH_OP(op, I, asm_op)
ATOMIC_OPS(and, i, and)
ATOMIC_OPS(or, i, or)
ATOMIC_OPS(xor, i, xor)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
__asm__ __volatile__ (
"0: ll.w %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add.w %[rc], %[p], %[a]\n"
" sc.w %[rc], %[c]\n"
" beqz %[rc], 0b\n"
" b 2f\n"
"1:\n"
__WEAK_LLSC_MB
"2:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc),
[c]"=ZB" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
return prev;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/*
* arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
{
int result;
int temp;
if (__builtin_constant_p(i)) {
__asm__ __volatile__(
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
" addi.w %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 2f \n"
" sc.w %1, %2 \n"
" beqz %1, 1b \n"
"2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "I" (-i));
} else {
__asm__ __volatile__(
"1: ll.w %1, %2 # atomic_sub_if_positive\n"
" sub.w %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 2f \n"
" sc.w %1, %2 \n"
" beqz %1, 1b \n"
"2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "r" (i));
}
return result;
}
#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/*
* arch_atomic_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*/
#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
#define ATOMIC64_INIT(i) { (i) }
/*
* arch_atomic64_read - read atomic variable
* @v: pointer of type atomic64_t
*
*/
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
/*
* arch_atomic64_set - set atomic variable
* @v: pointer of type atomic64_t
* @i: required value
*/
#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
#define ATOMIC64_OP(op, I, asm_op) \
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \
__asm__ __volatile__( \
"am"#asm_op"_db.d " " $zero, %1, %0 \n" \
: "+ZB" (v->counter) \
: "r" (I) \
: "memory"); \
}
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
{ \
long result; \
__asm__ __volatile__( \
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result c_op I; \
}
#define ATOMIC64_FETCH_OP(op, I, asm_op) \
static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
{ \
long result; \
\
__asm__ __volatile__( \
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
: "+ZB" (v->counter), "=&r" (result) \
: "r" (I) \
: "memory"); \
\
return result; \
}
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
ATOMIC64_FETCH_OP(op, I, asm_op)
ATOMIC64_OPS(add, i, add, +)
ATOMIC64_OPS(sub, -i, add, +)
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, I, asm_op) \
ATOMIC64_OP(op, I, asm_op) \
ATOMIC64_FETCH_OP(op, I, asm_op)
ATOMIC64_OPS(and, i, and)
ATOMIC64_OPS(or, i, or)
ATOMIC64_OPS(xor, i, xor)
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;
__asm__ __volatile__ (
"0: ll.d %[p], %[c]\n"
" beq %[p], %[u], 1f\n"
" add.d %[rc], %[p], %[a]\n"
" sc.d %[rc], %[c]\n"
" beqz %[rc], 0b\n"
" b 2f\n"
"1:\n"
__WEAK_LLSC_MB
"2:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc),
[c] "=ZB" (v->counter)
: [a]"r" (a), [u]"r" (u)
: "memory");
return prev;
}
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/*
* arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic64_t
*
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
{
long result;
long temp;
if (__builtin_constant_p(i)) {
__asm__ __volatile__(
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
" addi.d %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 2f \n"
" sc.d %1, %2 \n"
" beqz %1, 1b \n"
"2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "I" (-i));
} else {
__asm__ __volatile__(
"1: ll.d %1, %2 # atomic64_sub_if_positive \n"
" sub.d %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 2f \n"
" sc.d %1, %2 \n"
" beqz %1, 1b \n"
"2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "r" (i));
}
return result;
}
#define arch_atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/*
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*/
#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
#endif /* CONFIG_64BIT */
#endif /* _ASM_ATOMIC_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
#define __sync() __asm__ __volatile__("dbar 0" : : : "memory")
#define fast_wmb() __sync()
#define fast_rmb() __sync()
#define fast_mb() __sync()
#define fast_iob() __sync()
#define wbflush() __sync()
#define wmb() fast_wmb()
#define rmb() fast_rmb()
#define mb() fast_mb()
#define iob() fast_iob()
#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory")
#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory")
#define __smp_wmb() __asm__ __volatile__("dbar 0" : : : "memory")
#ifdef CONFIG_SMP
#define __WEAK_LLSC_MB " dbar 0 \n"
#else
#define __WEAK_LLSC_MB " \n"
#endif
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
* @size: number of elements in array
*
* Returns:
* 0 - (@index < @size)
*/
#define array_index_mask_nospec array_index_mask_nospec
static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
unsigned long mask;
__asm__ __volatile__(
"sltu %0, %1, %2\n\t"
#if (__SIZEOF_LONG__ == 4)
"sub.w %0, $zero, %0\n\t"
#elif (__SIZEOF_LONG__ == 8)
"sub.d %0, $zero, %0\n\t"
#endif
: "=r" (mask)
: "r" (index), "r" (size)
:);
return mask;
}
#define __smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u; \
unsigned long __tmp = 0; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
*(__u8 *)__u.__c = *(volatile __u8 *)p; \
__smp_mb(); \
break; \
case 2: \
*(__u16 *)__u.__c = *(volatile __u16 *)p; \
__smp_mb(); \
break; \
case 4: \
__asm__ __volatile__( \
"amor_db.w %[val], %[tmp], %[mem] \n" \
: [val] "=&r" (*(__u32 *)__u.__c) \
: [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__( \
"amor_db.d %[val], %[tmp], %[mem] \n" \
: [val] "=&r" (*(__u64 *)__u.__c) \
: [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \
: "memory"); \
break; \
} \
(typeof(*p))__u.__val; \
})
#define __smp_store_release(p, v) \
do { \
union { typeof(*p) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(*p)) (v) }; \
unsigned long __tmp; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
__smp_mb(); \
*(volatile __u8 *)p = *(__u8 *)__u.__c; \
break; \
case 2: \
__smp_mb(); \
*(volatile __u16 *)p = *(__u16 *)__u.__c; \
break; \
case 4: \
__asm__ __volatile__( \
"amswap_db.w %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u32 *)__u.__c) \
: ); \
break; \
case 8: \
__asm__ __volatile__( \
"amswap_db.d %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u64 *)__u.__c) \
: ); \
break; \
} \
} while (0)
#define __smp_store_mb(p, v) \
do { \
union { typeof(p) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(p)) (v) }; \
unsigned long __tmp; \
switch (sizeof(p)) { \
case 1: \
*(volatile __u8 *)&p = *(__u8 *)__u.__c; \
__smp_mb(); \
break; \
case 2: \
*(volatile __u16 *)&p = *(__u16 *)__u.__c; \
__smp_mb(); \
break; \
case 4: \
__asm__ __volatile__( \
"amswap_db.w %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u32 *)__u.__c) \
: ); \
break; \
case 8: \
__asm__ __volatile__( \
"amswap_db.d %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u64 *)__u.__c) \
: ); \
break; \
} \
} while (0)
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_BITOPS_H
#define _ASM_BITOPS_H
#include <linux/compiler.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <asm/barrier.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/builtin-ffs.h>
#include <asm-generic/bitops/builtin-fls.h>
#include <asm-generic/bitops/builtin-__ffs.h>
#include <asm-generic/bitops/builtin-__fls.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#endif /* _ASM_BITOPS_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __LOONGARCH_ASM_BITREV_H__
#define __LOONGARCH_ASM_BITREV_H__
#include <linux/swab.h>
static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
{
u32 ret;
asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x)));
return ret;
}
static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
{
u16 ret;
asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab16(x)));
return ret;
}
static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
{
u8 ret;
asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(x));
return ret;
}
#endif /* __LOONGARCH_ASM_BITREV_H__ */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_BOOTINFO_H
#define _ASM_BOOTINFO_H
#include <linux/types.h>
#include <asm/setup.h>
const char *get_system_type(void);
extern void init_environ(void);
extern void memblock_init(void);
extern void platform_init(void);
extern void plat_swiotlb_setup(void);
extern int __init init_numa_memory(void);
struct loongson_board_info {
int bios_size;
const char *bios_vendor;
const char *bios_version;
const char *bios_release_date;
const char *board_name;
const char *board_vendor;
};
struct loongson_system_configuration {
int nr_cpus;
int nr_nodes;
int boot_cpu_id;
int cores_per_node;
int cores_per_package;
unsigned long cores_io_master;
const char *cpuname;
};
extern u64 efi_system_table;
extern unsigned long fw_arg0, fw_arg1, fw_arg2;
extern struct loongson_board_info b_info;
extern struct loongson_system_configuration loongson_sysconf;
#endif /* _ASM_BOOTINFO_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_BRANCH_H
#define _ASM_BRANCH_H
#include <asm/ptrace.h>
static inline unsigned long exception_era(struct pt_regs *regs)
{
return regs->csr_era;
}
static inline void compute_return_era(struct pt_regs *regs)
{
regs->csr_era += 4;
}
#endif /* _ASM_BRANCH_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_BUG_H
#define __ASM_BUG_H
#include <linux/compiler.h>
#ifdef CONFIG_BUG
#include <asm/break.h>
static inline void __noreturn BUG(void)
{
__asm__ __volatile__("break %0" : : "i" (BRK_BUG));
unreachable();
}
#define HAVE_ARCH_BUG
#endif
#include <asm-generic/bug.h>
#endif /* __ASM_BUG_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H
#include <linux/mm.h>
#include <asm/cpu-features.h>
#include <asm/cacheops.h>
extern void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define cache_op(op, addr) \
__asm__ __volatile__( \
" cacop %0, %1 \n" \
: \
: "i" (op), "ZC" (*(unsigned char *)(addr)))
static inline void flush_icache_line_indexed(unsigned long addr)
{
cache_op(Index_Invalidate_I, addr);
}
static inline void flush_dcache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_D, addr);
}
static inline void flush_vcache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_V, addr);
}
static inline void flush_scache_line_indexed(unsigned long addr)
{
cache_op(Index_Writeback_Inv_S, addr);
}
static inline void flush_icache_line(unsigned long addr)
{
cache_op(Hit_Invalidate_I, addr);
}
static inline void flush_dcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_D, addr);
}
static inline void flush_vcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_V, addr);
}
static inline void flush_scache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_S, addr);
}
#include <asm-generic/cacheflush.h>
#endif /* _ASM_CACHEFLUSH_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Cache operations for the cache instruction.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_CACHEOPS_H
#define __ASM_CACHEOPS_H
/*
* Most cache ops are split into a 2 bit field identifying the cache, and a 3
* bit field identifying the cache operation.
*/
#define CacheOp_Cache 0x03
#define CacheOp_Op 0x1c
#define Cache_I 0x00
#define Cache_D 0x01
#define Cache_V 0x02
#define Cache_S 0x03
#define Index_Invalidate 0x08
#define Index_Writeback_Inv 0x08
#define Hit_Invalidate 0x10
#define Hit_Writeback_Inv 0x10
#define CacheOp_User_Defined 0x18
#define Index_Invalidate_I (Cache_I | Index_Invalidate)
#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv)
#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv)
#define Hit_Invalidate_I (Cache_I | Hit_Invalidate)
#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv)
#endif /* __ASM_CACHEOPS_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_CLOCKSOURCE_H
#define __ASM_CLOCKSOURCE_H
#include <asm/vdso/clocksource.h>
#endif /* __ASM_CLOCKSOURCE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
#include <linux/bits.h>
#include <linux/build_bug.h>
#include <asm/barrier.h>
#define __xchg_asm(amswap_db, m, val) \
({ \
__typeof(val) __ret; \
\
__asm__ __volatile__ ( \
" "amswap_db" %1, %z2, %0 \n" \
: "+ZB" (*m), "=&r" (__ret) \
: "Jr" (val) \
: "memory"); \
\
__ret; \
})
static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
unsigned int size)
{
unsigned int shift;
u32 old32, mask, temp;
volatile u32 *ptr32;
/* Mask value to the correct size. */
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
val &= mask;
/*
* Calculate a shift & mask that correspond to the value we wish to
* exchange within the naturally aligned 4 byte integerthat includes
* it.
*/
shift = (unsigned long)ptr & 0x3;
shift *= BITS_PER_BYTE;
mask <<= shift;
/*
* Calculate a pointer to the naturally aligned 4 byte integer that
* includes our byte of interest, and load its value.
*/
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
asm volatile (
"1: ll.w %0, %3 \n"
" andn %1, %0, %z4 \n"
" or %1, %1, %z5 \n"
" sc.w %1, %2 \n"
" beqz %1, 1b \n"
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
: "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
: "memory");
return (old32 & mask) >> shift;
}
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
int size)
{
switch (size) {
case 1:
case 2:
return __xchg_small(ptr, x, size);
case 4:
return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
case 8:
return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
default:
BUILD_BUG();
}
return 0;
}
#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __res; \
\
__res = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
\
__res; \
})
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(old) __ret; \
\
__asm__ __volatile__( \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" move $t0, %z4 \n" \
" " st " $t0, %1 \n" \
" beqz $t0, 1b \n" \
"2: \n" \
__WEAK_LLSC_MB \
: "=&r" (__ret), "=ZB"(*m) \
: "ZB"(*m), "Jr" (old), "Jr" (new) \
: "t0", "memory"); \
\
__ret; \
})
static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
unsigned int new, unsigned int size)
{
unsigned int shift;
u32 old32, mask, temp;
volatile u32 *ptr32;
/* Mask inputs to the correct size. */
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
old &= mask;
new &= mask;
/*
* Calculate a shift & mask that correspond to the value we wish to
* compare & exchange within the naturally aligned 4 byte integer
* that includes it.
*/
shift = (unsigned long)ptr & 0x3;
shift *= BITS_PER_BYTE;
old <<= shift;
new <<= shift;
mask <<= shift;
/*
* Calculate a pointer to the naturally aligned 4 byte integer that
* includes our byte of interest, and load its value.
*/
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
asm volatile (
"1: ll.w %0, %3 \n"
" and %1, %0, %z4 \n"
" bne %1, %z5, 2f \n"
" andn %1, %0, %z4 \n"
" or %1, %1, %z6 \n"
" sc.w %1, %2 \n"
" beqz %1, 1b \n"
" b 3f \n"
"2: \n"
__WEAK_LLSC_MB
"3: \n"
: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
: "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
: "memory");
return (old32 & mask) >> shift;
}
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
switch (size) {
case 1:
case 2:
return __cmpxchg_small(ptr, old, new, size);
case 4:
return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
(u32)old, new);
case 8:
return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
(u64)old, new);
default:
BUILD_BUG();
}
return 0;
}
#define arch_cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \
__cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr))))
#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __res; \
\
__res = arch_cmpxchg_local((ptr), (old), (new)); \
\
__res; \
})
#ifdef CONFIG_64BIT
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
arch_cmpxchg_local((ptr), (o), (n)); \
})
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
arch_cmpxchg((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#endif
#endif /* __ASM_CMPXCHG_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 2003, 2004 Ralf Baechle
* Copyright (C) 2004 Maciej W. Rozycki
*/
#ifndef __ASM_CPU_FEATURES_H
#define __ASM_CPU_FEATURES_H
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#define cpu_opt(opt) (cpu_data[0].options & (opt))
#define cpu_has(feat) (cpu_data[0].options & BIT_ULL(feat))
#define cpu_has_loongarch (cpu_has_loongarch32 | cpu_has_loongarch64)
#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#define cpu_icache_line_size() cpu_data[0].icache.linesz
#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
#define cpu_vcache_line_size() cpu_data[0].vcache.linesz
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#ifdef CONFIG_32BIT
# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31
# define cpu_pabits 31
#endif
#ifdef CONFIG_64BIT
# define cpu_has_64bits 1
# define cpu_vabits cpu_data[0].vabits
# define cpu_pabits cpu_data[0].pabits
# define __NEED_ADDRBITS_PROBE
#endif
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known LoongArch systems.
*/
#define cpu_has_cpucfg cpu_opt(LOONGARCH_CPU_CPUCFG)
#define cpu_has_lam cpu_opt(LOONGARCH_CPU_LAM)
#define cpu_has_ual cpu_opt(LOONGARCH_CPU_UAL)
#define cpu_has_fpu cpu_opt(LOONGARCH_CPU_FPU)
#define cpu_has_lsx cpu_opt(LOONGARCH_CPU_LSX)
#define cpu_has_lasx cpu_opt(LOONGARCH_CPU_LASX)
#define cpu_has_complex cpu_opt(LOONGARCH_CPU_COMPLEX)
#define cpu_has_crypto cpu_opt(LOONGARCH_CPU_CRYPTO)
#define cpu_has_lvz cpu_opt(LOONGARCH_CPU_LVZ)
#define cpu_has_lbt_x86 cpu_opt(LOONGARCH_CPU_LBT_X86)
#define cpu_has_lbt_arm cpu_opt(LOONGARCH_CPU_LBT_ARM)
#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
#define cpu_has_csripi cpu_opt(LOONGARCH_CPU_CSRIPI)
#define cpu_has_extioi cpu_opt(LOONGARCH_CPU_EXTIOI)
#define cpu_has_prefetch cpu_opt(LOONGARCH_CPU_PREFETCH)
#define cpu_has_pmp cpu_opt(LOONGARCH_CPU_PMP)
#define cpu_has_perf cpu_opt(LOONGARCH_CPU_PMP)
#define cpu_has_scalefreq cpu_opt(LOONGARCH_CPU_SCALEFREQ)
#define cpu_has_flatmode cpu_opt(LOONGARCH_CPU_FLATMODE)
#define cpu_has_eiodecode cpu_opt(LOONGARCH_CPU_EIODECODE)
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#endif /* __ASM_CPU_FEATURES_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_CPU_INFO_H
#define __ASM_CPU_INFO_H
#include <linux/cache.h>
#include <linux/types.h>
#include <asm/loongarch.h>
/*
* Descriptor for a cache
*/
struct cache_desc {
unsigned int waysize; /* Bytes per way */
unsigned short sets; /* Number of lines per set */
unsigned char ways; /* Number of ways */
unsigned char linesz; /* Size of line in bytes */
unsigned char waybit; /* Bits to select in a cache set */
unsigned char flags; /* Flags describing cache properties */
};
struct cpuinfo_loongarch {
u64 asid_cache;
unsigned long asid_mask;
/*
* Capability and feature descriptor structure for LoongArch CPU
*/
unsigned long long options;
unsigned int processor_id;
unsigned int fpu_vers;
unsigned int fpu_csr0;
unsigned int fpu_mask;
unsigned int cputype;
int isa_level;
int tlbsize;
int tlbsizemtlb;
int tlbsizestlbsets;
int tlbsizestlbways;
struct cache_desc icache; /* Primary I-cache */
struct cache_desc dcache; /* Primary D or combined I/D cache */
struct cache_desc vcache; /* Victim cache, between pcache and scache */
struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */
int core; /* physical core number in package */
int package;/* physical package number */
int vabits; /* Virtual Address size in bits */
int pabits; /* Physical Address size in bits */
unsigned int ksave_mask; /* Usable KSave mask. */
unsigned int watch_dreg_count; /* Number data breakpoints */
unsigned int watch_ireg_count; /* Number instruction breakpoints */
unsigned int watch_reg_use_cnt; /* min(NUM_WATCH_REGS, watch_dreg_count + watch_ireg_count), Usable by ptrace */
} __aligned(SMP_CACHE_BYTES);
extern struct cpuinfo_loongarch cpu_data[];
#define boot_cpu_data cpu_data[0]
#define current_cpu_data cpu_data[smp_processor_id()]
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
extern void cpu_probe(void);
extern const char *__cpu_family[];
extern const char *__cpu_full_name[];
#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
struct seq_file;
struct notifier_block;
extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
#define proc_cpuinfo_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \
\
register_proc_cpuinfo_notifier(&fn##_nb); \
})
struct proc_cpuinfo_notifier_args {
struct seq_file *m;
unsigned long n;
};
static inline bool cpus_are_siblings(int cpua, int cpub)
{
struct cpuinfo_loongarch *infoa = &cpu_data[cpua];
struct cpuinfo_loongarch *infob = &cpu_data[cpub];
if (infoa->package != infob->package)
return false;
if (infoa->core != infob->core)
return false;
return true;
}
static inline unsigned long cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo)
{
return cpuinfo->asid_mask;
}
static inline void set_cpu_asid_mask(struct cpuinfo_loongarch *cpuinfo,
unsigned long asid_mask)
{
cpuinfo->asid_mask = asid_mask;
}
#endif /* __ASM_CPU_INFO_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* cpu.h: Values of the PRID register used to match up
* various LoongArch CPU types.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_CPU_H
#define _ASM_CPU_H
/*
* As described in LoongArch specs from Loongson Technology, the PRID register
* (CPUCFG.00) has the following layout:
*
* +---------------+----------------+------------+--------------------+
* | Reserved | Company ID | Series ID | Product ID |
* +---------------+----------------+------------+--------------------+
* 31 24 23 16 15 12 11 0
*/
/*
* Assigned Company values for bits 23:16 of the PRID register.
*/
#define PRID_COMP_MASK 0xff0000
#define PRID_COMP_LOONGSON 0x140000
/*
* Assigned Series ID values for bits 15:12 of the PRID register. In order
* to detect a certain CPU type exactly eventually additional registers may
* need to be examined.
*/
#define PRID_SERIES_MASK 0xf000
#define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */
#define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */
#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit,3-issue */
#define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */
#define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */
/*
* Particular Product ID values for bits 11:0 of the PRID register.
*/
#define PRID_PRODUCT_MASK 0x0fff
#if !defined(__ASSEMBLY__)
enum cpu_type_enum {
CPU_UNKNOWN,
CPU_LOONGSON32,
CPU_LOONGSON64,
CPU_LAST
};
#endif /* !__ASSEMBLY */
/*
* ISA Level encodings
*
*/
#define LOONGARCH_CPU_ISA_LA32R 0x00000001
#define LOONGARCH_CPU_ISA_LA32S 0x00000002
#define LOONGARCH_CPU_ISA_LA64 0x00000004
#define LOONGARCH_CPU_ISA_32BIT (LOONGARCH_CPU_ISA_LA32R | LOONGARCH_CPU_ISA_LA32S)
#define LOONGARCH_CPU_ISA_64BIT LOONGARCH_CPU_ISA_LA64
/*
* CPU Option encodings
*/
#define CPU_FEATURE_CPUCFG 0 /* CPU has CPUCFG */
#define CPU_FEATURE_LAM 1 /* CPU has Atomic instructions */
#define CPU_FEATURE_UAL 2 /* CPU supports unaligned access */
#define CPU_FEATURE_FPU 3 /* CPU has FPU */
#define CPU_FEATURE_LSX 4 /* CPU has LSX (128-bit SIMD) */
#define CPU_FEATURE_LASX 5 /* CPU has LASX (256-bit SIMD) */
#define CPU_FEATURE_COMPLEX 6 /* CPU has Complex instructions */
#define CPU_FEATURE_CRYPTO 7 /* CPU has Crypto instructions */
#define CPU_FEATURE_LVZ 8 /* CPU has Virtualization extension */
#define CPU_FEATURE_LBT_X86 9 /* CPU has X86 Binary Translation */
#define CPU_FEATURE_LBT_ARM 10 /* CPU has ARM Binary Translation */
#define CPU_FEATURE_LBT_MIPS 11 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 12 /* CPU has TLB */
#define CPU_FEATURE_CSR 13 /* CPU has CSR */
#define CPU_FEATURE_WATCH 14 /* CPU has watchpoint registers */
#define CPU_FEATURE_VINT 15 /* CPU has vectored interrupts */
#define CPU_FEATURE_CSRIPI 16 /* CPU has CSR-IPI */
#define CPU_FEATURE_EXTIOI 17 /* CPU has EXT-IOI */
#define CPU_FEATURE_PREFETCH 18 /* CPU has prefetch instructions */
#define CPU_FEATURE_PMP 19 /* CPU has perfermance counter */
#define CPU_FEATURE_SCALEFREQ 20 /* CPU supports cpufreq scaling */
#define CPU_FEATURE_FLATMODE 21 /* CPU has flat mode */
#define CPU_FEATURE_EIODECODE 22 /* CPU has EXTIOI interrupt pin decode mode */
#define CPU_FEATURE_GUESTID 23 /* CPU has GuestID feature */
#define CPU_FEATURE_HYPERVISOR 24 /* CPU has hypervisor (running in VM) */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
#define LOONGARCH_CPU_UAL BIT_ULL(CPU_FEATURE_UAL)
#define LOONGARCH_CPU_FPU BIT_ULL(CPU_FEATURE_FPU)
#define LOONGARCH_CPU_LSX BIT_ULL(CPU_FEATURE_LSX)
#define LOONGARCH_CPU_LASX BIT_ULL(CPU_FEATURE_LASX)
#define LOONGARCH_CPU_COMPLEX BIT_ULL(CPU_FEATURE_COMPLEX)
#define LOONGARCH_CPU_CRYPTO BIT_ULL(CPU_FEATURE_CRYPTO)
#define LOONGARCH_CPU_LVZ BIT_ULL(CPU_FEATURE_LVZ)
#define LOONGARCH_CPU_LBT_X86 BIT_ULL(CPU_FEATURE_LBT_X86)
#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
#define LOONGARCH_CPU_CSRIPI BIT_ULL(CPU_FEATURE_CSRIPI)
#define LOONGARCH_CPU_EXTIOI BIT_ULL(CPU_FEATURE_EXTIOI)
#define LOONGARCH_CPU_PREFETCH BIT_ULL(CPU_FEATURE_PREFETCH)
#define LOONGARCH_CPU_PMP BIT_ULL(CPU_FEATURE_PMP)
#define LOONGARCH_CPU_SCALEFREQ BIT_ULL(CPU_FEATURE_SCALEFREQ)
#define LOONGARCH_CPU_FLATMODE BIT_ULL(CPU_FEATURE_FLATMODE)
#define LOONGARCH_CPU_EIODECODE BIT_ULL(CPU_FEATURE_EIODECODE)
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#endif /* _ASM_CPU_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* CPU feature definitions for module loading, used by
* module_cpu_feature_match(), see uapi/asm/hwcap.h for LoongArch CPU features.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H
#include <uapi/asm/hwcap.h>
#include <asm/elf.h>
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
static inline bool cpu_have_feature(unsigned int num)
{
return elf_hwcap & (1UL << num);
}
#endif /* __ASM_CPUFEATURE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_DELAY_H
#define _ASM_DELAY_H
#include <linux/param.h>
extern void __delay(unsigned long cycles);
extern void __ndelay(unsigned long ns);
extern void __udelay(unsigned long us);
#define ndelay(ns) __ndelay(ns)
#define udelay(us) __udelay(us)
/* make sure "usecs *= ..." in udelay do not overflow. */
#if HZ >= 1000
#define MAX_UDELAY_MS 1
#elif HZ <= 200
#define MAX_UDELAY_MS 5
#else
#define MAX_UDELAY_MS (1000 / HZ)
#endif
#endif /* _ASM_DELAY_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _LOONGARCH_DMA_DIRECT_H
#define _LOONGARCH_DMA_DIRECT_H
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
#endif /* _LOONGARCH_DMA_DIRECT_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_DMA_H
#define __ASM_DMA_H
#define MAX_DMA_ADDRESS PAGE_OFFSET
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
extern int isa_dma_bridge_buggy;
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_DMI_H
#define _ASM_DMI_H
#include <linux/io.h>
#include <linux/memblock.h>
#define dmi_early_remap(x, l) dmi_remap(x, l)
#define dmi_early_unmap(x, l) dmi_unmap(x)
#define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE)
static inline void *dmi_remap(u64 phys_addr, unsigned long size)
{
return ((void *)TO_CACHE(phys_addr));
}
static inline void dmi_unmap(void *addr)
{
}
#endif /* _ASM_DMI_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_LOONGARCH_EFI_H
#define _ASM_LOONGARCH_EFI_H
#include <linux/efi.h>
void __init loongson_efi_init(void);
void __init efi_runtime_init(void);
#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000004 /* Bit 2: CSR.CRMD.IE */
#define arch_efi_call_virt_setup()
#define arch_efi_call_virt_teardown()
#define EFI_ALLOC_ALIGN SZ_64K
#define EFI_RT_VIRTUAL_OFFSET CSR_DMW0_BASE
static inline struct screen_info *alloc_screen_info(void)
{
return &screen_info;
}
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{
}
static inline void free_screen_info(struct screen_info *si)
{
}
static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr)
{
return ULONG_MAX;
}
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
{
return ULONG_MAX;
}
extern void *early_memremap_ro(resource_size_t phys_addr, unsigned long size);
#endif /* _ASM_LOONGARCH_EFI_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_ELF_H
#define _ASM_ELF_H
#include <linux/auxvec.h>
#include <linux/fs.h>
#include <uapi/linux/elf.h>
#include <asm/current.h>
#include <asm/vdso.h>
/* The ABI of a file. */
#define EF_LOONGARCH_ABI_LP64_SOFT_FLOAT 0x1
#define EF_LOONGARCH_ABI_LP64_SINGLE_FLOAT 0x2
#define EF_LOONGARCH_ABI_LP64_DOUBLE_FLOAT 0x3
#define EF_LOONGARCH_ABI_ILP32_SOFT_FLOAT 0x5
#define EF_LOONGARCH_ABI_ILP32_SINGLE_FLOAT 0x6
#define EF_LOONGARCH_ABI_ILP32_DOUBLE_FLOAT 0x7
/* LoongArch relocation types used by the dynamic linker */
#define R_LARCH_NONE 0
#define R_LARCH_32 1
#define R_LARCH_64 2
#define R_LARCH_RELATIVE 3
#define R_LARCH_COPY 4
#define R_LARCH_JUMP_SLOT 5
#define R_LARCH_TLS_DTPMOD32 6
#define R_LARCH_TLS_DTPMOD64 7
#define R_LARCH_TLS_DTPREL32 8
#define R_LARCH_TLS_DTPREL64 9
#define R_LARCH_TLS_TPREL32 10
#define R_LARCH_TLS_TPREL64 11
#define R_LARCH_IRELATIVE 12
#define R_LARCH_MARK_LA 20
#define R_LARCH_MARK_PCREL 21
#define R_LARCH_SOP_PUSH_PCREL 22
#define R_LARCH_SOP_PUSH_ABSOLUTE 23
#define R_LARCH_SOP_PUSH_DUP 24
#define R_LARCH_SOP_PUSH_GPREL 25
#define R_LARCH_SOP_PUSH_TLS_TPREL 26
#define R_LARCH_SOP_PUSH_TLS_GOT 27
#define R_LARCH_SOP_PUSH_TLS_GD 28
#define R_LARCH_SOP_PUSH_PLT_PCREL 29
#define R_LARCH_SOP_ASSERT 30
#define R_LARCH_SOP_NOT 31
#define R_LARCH_SOP_SUB 32
#define R_LARCH_SOP_SL 33
#define R_LARCH_SOP_SR 34
#define R_LARCH_SOP_ADD 35
#define R_LARCH_SOP_AND 36
#define R_LARCH_SOP_IF_ELSE 37
#define R_LARCH_SOP_POP_32_S_10_5 38
#define R_LARCH_SOP_POP_32_U_10_12 39
#define R_LARCH_SOP_POP_32_S_10_12 40
#define R_LARCH_SOP_POP_32_S_10_16 41
#define R_LARCH_SOP_POP_32_S_10_16_S2 42
#define R_LARCH_SOP_POP_32_S_5_20 43
#define R_LARCH_SOP_POP_32_S_0_5_10_16_S2 44
#define R_LARCH_SOP_POP_32_S_0_10_10_16_S2 45
#define R_LARCH_SOP_POP_32_U 46
#define R_LARCH_ADD8 47
#define R_LARCH_ADD16 48
#define R_LARCH_ADD24 49
#define R_LARCH_ADD32 50
#define R_LARCH_ADD64 51
#define R_LARCH_SUB8 52
#define R_LARCH_SUB16 53
#define R_LARCH_SUB24 54
#define R_LARCH_SUB32 55
#define R_LARCH_SUB64 56
#define R_LARCH_GNU_VTINHERIT 57
#define R_LARCH_GNU_VTENTRY 58
#define R_LARCH_B16 64
#define R_LARCH_B21 65
#define R_LARCH_B26 66
#define R_LARCH_ABS_HI20 67
#define R_LARCH_ABS_LO12 68
#define R_LARCH_ABS64_LO20 69
#define R_LARCH_ABS64_HI12 70
#define R_LARCH_PCALA_HI20 71
#define R_LARCH_PCALA_LO12 72
#define R_LARCH_PCALA64_LO20 73
#define R_LARCH_PCALA64_HI12 74
#define R_LARCH_GOT_PC_HI20 75
#define R_LARCH_GOT_PC_LO12 76
#define R_LARCH_GOT64_PC_LO20 77
#define R_LARCH_GOT64_PC_HI12 78
#define R_LARCH_GOT_HI20 79
#define R_LARCH_GOT_LO12 80
#define R_LARCH_GOT64_LO20 81
#define R_LARCH_GOT64_HI12 82
#define R_LARCH_TLS_LE_HI20 83
#define R_LARCH_TLS_LE_LO12 84
#define R_LARCH_TLS_LE64_LO20 85
#define R_LARCH_TLS_LE64_HI12 86
#define R_LARCH_TLS_IE_PC_HI20 87
#define R_LARCH_TLS_IE_PC_LO12 88
#define R_LARCH_TLS_IE64_PC_LO20 89
#define R_LARCH_TLS_IE64_PC_HI12 90
#define R_LARCH_TLS_IE_HI20 91
#define R_LARCH_TLS_IE_LO12 92
#define R_LARCH_TLS_IE64_LO20 93
#define R_LARCH_TLS_IE64_HI12 94
#define R_LARCH_TLS_LD_PC_HI20 95
#define R_LARCH_TLS_LD_HI20 96
#define R_LARCH_TLS_GD_PC_HI20 97
#define R_LARCH_TLS_GD_HI20 98
#define R_LARCH_32_PCREL 99
#define R_LARCH_RELAX 100
#ifndef ELF_ARCH
/* ELF register definitions */
/*
* General purpose have the following registers:
* Register Number
* GPRs 32
* ORIG_A0 1
* ERA 1
* BADVADDR 1
* CRMD 1
* PRMD 1
* EUEN 1
* ECFG 1
* ESTAT 1
* Reserved 5
*/
#define ELF_NGREG 45
/*
* Floating point have the following registers:
* Register Number
* FPR 32
* FCC 1
* FCSR 1
*/
#define ELF_NFPREG 34
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
#ifdef CONFIG_32BIT
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch elf32_check_arch
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_CORE_COPY_REGS(dest, regs) \
loongarch_dump_regs32((u32 *)&(dest), (regs));
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch elf64_check_arch
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#define ELF_CORE_COPY_REGS(dest, regs) \
loongarch_dump_regs64((u64 *)&(dest), (regs));
#endif /* CONFIG_64BIT */
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_LOONGARCH
#endif /* !defined(ELF_ARCH) */
#define loongarch_elf_check_machine(x) ((x)->e_machine == EM_LOONGARCH)
#define vmcore_elf32_check_arch loongarch_elf_check_machine
#define vmcore_elf64_check_arch loongarch_elf_check_machine
/*
* Return non-zero if HDR identifies an 32bit ELF binary.
*/
#define elf32_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (!loongarch_elf_check_machine(__h)) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
\
__res; \
})
/*
* Return non-zero if HDR identifies an 64bit ELF binary.
*/
#define elf64_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
if (!loongarch_elf_check_machine(__h)) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
__res = 0; \
\
__res; \
})
#ifdef CONFIG_32BIT
#define SET_PERSONALITY2(ex, state) \
do { \
current->thread.vdso = &vdso_info; \
\
loongarch_set_personality_fcsr(state); \
\
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
} while (0)
#endif /* CONFIG_32BIT */
#ifdef CONFIG_64BIT
#define SET_PERSONALITY2(ex, state) \
do { \
unsigned int p; \
\
clear_thread_flag(TIF_32BIT_REGS); \
clear_thread_flag(TIF_32BIT_ADDR); \
\
current->thread.vdso = &vdso_info; \
loongarch_set_personality_fcsr(state); \
\
p = personality(current->personality); \
if (p != PER_LINUX32 && p != PER_LINUX) \
set_personality(PER_LINUX); \
} while (0)
#endif /* CONFIG_64BIT */
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports. This could be done in userspace,
* but it's not easy, and we've already done it here.
*/
#define ELF_HWCAP (elf_hwcap)
extern unsigned int elf_hwcap;
#include <asm/hwcap.h>
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
#define ELF_PLATFORM __elf_platform
extern const char *__elf_platform;
#define ELF_PLAT_INIT(_r, load_addr) do { \
_r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \
_r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \
_r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \
_r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \
_r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \
_r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \
_r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \
_r->regs[29] = _r->regs[30] = _r->regs[31] = 0; \
} while (0)
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \
} while (0)
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
struct arch_elf_state {
int fp_abi;
int interp_fp_abi;
};
#define LOONGARCH_ABI_FP_ANY (0)
#define INIT_ARCH_ELF_STATE { \
.fp_abi = LOONGARCH_ABI_FP_ANY, \
.interp_fp_abi = LOONGARCH_ABI_FP_ANY, \
}
extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
bool is_interp, struct arch_elf_state *state);
extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
struct arch_elf_state *state);
extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
#endif /* _ASM_ELF_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ARCH_LOONGARCH_ENTRY_COMMON_H
#define ARCH_LOONGARCH_ENTRY_COMMON_H
#include <linux/sched.h>
#include <linux/processor.h>
static inline bool on_thread_stack(void)
{
return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1));
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_EXEC_H
#define _ASM_EXEC_H
extern unsigned long arch_align_stack(unsigned long sp);
#endif /* _ASM_EXEC_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* fixmap.h: compile-time virtual memory allocation
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#define NR_FIX_BTMAPS 64
enum fixed_addresses {
FIX_HOLE,
FIX_EARLYCON_MEM_BASE,
__end_of_fixed_addresses
};
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXMAP_PAGE_IO PAGE_KERNEL_SUC
extern void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
#include <asm-generic/fixmap.h>
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the FPU register names
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_FPREGDEF_H
#define _ASM_FPREGDEF_H
#define fa0 $f0 /* argument registers, fa0/fa1 reused as fv0/fv1 for return value */
#define fa1 $f1
#define fa2 $f2
#define fa3 $f3
#define fa4 $f4
#define fa5 $f5
#define fa6 $f6
#define fa7 $f7
#define ft0 $f8 /* caller saved */
#define ft1 $f9
#define ft2 $f10
#define ft3 $f11
#define ft4 $f12
#define ft5 $f13
#define ft6 $f14
#define ft7 $f15
#define ft8 $f16
#define ft9 $f17
#define ft10 $f18
#define ft11 $f19
#define ft12 $f20
#define ft13 $f21
#define ft14 $f22
#define ft15 $f23
#define fs0 $f24 /* callee saved */
#define fs1 $f25
#define fs2 $f26
#define fs3 $f27
#define fs4 $f28
#define fs5 $f29
#define fs6 $f30
#define fs7 $f31
/*
* Current binutils expects *GPRs* at FCSR position for the FCSR
* operation instructions, so define aliases for those used.
*/
#define fcsr0 $r0
#define fcsr1 $r1
#define fcsr2 $r2
#define fcsr3 $r3
#endif /* _ASM_FPREGDEF_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_FPU_H
#define _ASM_FPU_H
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/ptrace.h>
#include <linux/thread_info.h>
#include <linux/bitops.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/current.h>
#include <asm/loongarch.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
struct sigcontext;
extern void _init_fpu(unsigned int);
extern void _save_fp(struct loongarch_fpu *);
extern void _restore_fp(struct loongarch_fpu *);
/*
* Mask the FCSR Cause bits according to the Enable bits, observing
* that Unimplemented is always enabled.
*/
static inline unsigned long mask_fcsr_x(unsigned long fcsr)
{
return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
}
static inline int is_fp_enabled(void)
{
return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
1 : 0;
}
#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)
#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
static inline int is_fpu_owner(void)
{
return test_thread_flag(TIF_USEDFPU);
}
static inline void __own_fpu(void)
{
enable_fpu();
set_thread_flag(TIF_USEDFPU);
KSTK_EUEN(current) |= CSR_EUEN_FPEN;
}
static inline void own_fpu_inatomic(int restore)
{
if (cpu_has_fpu && !is_fpu_owner()) {
__own_fpu();
if (restore)
_restore_fp(&current->thread.fpu);
}
}
static inline void own_fpu(int restore)
{
preempt_disable();
own_fpu_inatomic(restore);
preempt_enable();
}
static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
{
if (is_fpu_owner()) {
if (save)
_save_fp(&tsk->thread.fpu);
disable_fpu();
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
}
KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
}
static inline void lose_fpu(int save)
{
preempt_disable();
lose_fpu_inatomic(save, current);
preempt_enable();
}
static inline void init_fpu(void)
{
unsigned int fcsr = current->thread.fpu.fcsr;
__own_fpu();
_init_fpu(fcsr);
set_used_math();
}
static inline void save_fp(struct task_struct *tsk)
{
if (cpu_has_fpu)
_save_fp(&tsk->thread.fpu);
}
static inline void restore_fp(struct task_struct *tsk)
{
if (cpu_has_fpu)
_restore_fp(&tsk->thread.fpu);
}
static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
{
if (tsk == current) {
preempt_disable();
if (is_fpu_owner())
_save_fp(&current->thread.fpu);
preempt_enable();
}
return tsk->thread.fpu.fpr;
}
#endif /* _ASM_FPU_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/barrier.h>
#include <asm/errno.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
__asm__ __volatile__( \
"1: ll.w %1, %4 # __futex_atomic_op\n" \
" " insn " \n" \
"2: sc.w $t0, %2 \n" \
" beqz $t0, 1b \n" \
"3: \n" \
" .section .fixup,\"ax\" \n" \
"4: li.w %0, %6 \n" \
" b 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=&r" (oldval), \
"=ZC" (*uaddr) \
: "0" (0), "ZC" (*uaddr), "Jr" (oparg), \
"i" (-EFAULT) \
: "memory", "t0"); \
}
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
int oldval = 0, ret = 0;
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("move $t0, %z5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add.w $t0, %1, %z5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("or $t0, %1, %z5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and $t0, %1, %z5", ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("xor $t0, %1, %z5", ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret)
*oval = oldval;
return ret;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
{
int ret = 0;
u32 val = 0;
if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
"1: ll.w %1, %3 \n"
" bne %1, %z4, 3f \n"
" move $t0, %z5 \n"
"2: sc.w $t0, %2 \n"
" beqz $t0, 1b \n"
"3: \n"
__WEAK_LLSC_MB
" .section .fixup,\"ax\" \n"
"4: li.d %0, %6 \n"
" b 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
: "+r" (ret), "=&r" (val), "=ZC" (*uaddr)
: "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT)
: "memory", "t0");
*uval = val;
return ret;
}
#endif /* _ASM_FUTEX_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_HARDIRQ_H
#define _ASM_HARDIRQ_H
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/irq.h>
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
#define NR_IPI 2
typedef struct {
unsigned int ipi_irqs[NR_IPI];
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#endif /* _ASM_HARDIRQ_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_HUGETLB_H
#define __ASM_HUGETLB_H
#include <asm/page.h>
uint64_t pmd_to_entrylo(unsigned long pmd_val);
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr,
unsigned long len)
{
unsigned long task_size = STACK_TOP;
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
if (len > task_size)
return -ENOMEM;
if (task_size - len < addr)
return -EINVAL;
return 0;
}
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t clear;
pte_t pte = *ptep;
pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear);
return pte;
}
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_tlb_page(vma, addr);
return pte;
}
#define __HAVE_ARCH_HUGE_PTE_NONE
static inline int huge_pte_none(pte_t pte)
{
unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
return !val || (val == (unsigned long)invalid_pte_table);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep, pte_t pte,
int dirty)
{
int changed = !pte_same(*ptep, pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
/*
* There could be some standard sized pages in there,
* get them all.
*/
flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
}
return changed;
}
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_HW_IRQ_H
#define __ASM_HW_IRQ_H
#include <linux/atomic.h>
extern atomic_t irq_err_count;
/*
* interrupt-retrigger: NOP for now. This may not be appropriate for all
* machines, we'll see ...
*/
#endif /* __ASM_HW_IRQ_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_IDLE_H
#define __ASM_IDLE_H
#include <linux/linkage.h>
extern asmlinkage void __arch_cpu_idle(void);
#endif /* __ASM_IDLE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_INST_H
#define _ASM_INST_H
#include <linux/types.h>
#include <asm/asm.h>
#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000
#define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000
#define ADDR_IMMSHIFT_LU52ID 52
#define ADDR_IMMSHIFT_LU32ID 32
#define ADDR_IMMSHIFT_ADDU16ID 16
#define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
enum reg1i20_op {
lu12iw_op = 0x0a,
lu32id_op = 0x0b,
};
enum reg1i21_op {
beqz_op = 0x10,
bnez_op = 0x11,
};
enum reg2i12_op {
addiw_op = 0x0a,
addid_op = 0x0b,
lu52id_op = 0x0c,
ldb_op = 0xa0,
ldh_op = 0xa1,
ldw_op = 0xa2,
ldd_op = 0xa3,
stb_op = 0xa4,
sth_op = 0xa5,
stw_op = 0xa6,
std_op = 0xa7,
};
enum reg2i16_op {
jirl_op = 0x13,
beq_op = 0x16,
bne_op = 0x17,
blt_op = 0x18,
bge_op = 0x19,
bltu_op = 0x1a,
bgeu_op = 0x1b,
};
struct reg0i26_format {
unsigned int immediate_h : 10;
unsigned int immediate_l : 16;
unsigned int opcode : 6;
};
struct reg1i20_format {
unsigned int rd : 5;
unsigned int immediate : 20;
unsigned int opcode : 7;
};
struct reg1i21_format {
unsigned int immediate_h : 5;
unsigned int rj : 5;
unsigned int immediate_l : 16;
unsigned int opcode : 6;
};
struct reg2i12_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int immediate : 12;
unsigned int opcode : 10;
};
struct reg2i16_format {
unsigned int rd : 5;
unsigned int rj : 5;
unsigned int immediate : 16;
unsigned int opcode : 6;
};
union loongarch_instruction {
unsigned int word;
struct reg0i26_format reg0i26_format;
struct reg1i20_format reg1i20_format;
struct reg1i21_format reg1i21_format;
struct reg2i12_format reg2i12_format;
struct reg2i16_format reg2i16_format;
};
#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction)
enum loongarch_gpr {
LOONGARCH_GPR_ZERO = 0,
LOONGARCH_GPR_RA = 1,
LOONGARCH_GPR_TP = 2,
LOONGARCH_GPR_SP = 3,
LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */
LOONGARCH_GPR_A1, /* Reused as V1 for return value */
LOONGARCH_GPR_A2,
LOONGARCH_GPR_A3,
LOONGARCH_GPR_A4,
LOONGARCH_GPR_A5,
LOONGARCH_GPR_A6,
LOONGARCH_GPR_A7,
LOONGARCH_GPR_T0 = 12,
LOONGARCH_GPR_T1,
LOONGARCH_GPR_T2,
LOONGARCH_GPR_T3,
LOONGARCH_GPR_T4,
LOONGARCH_GPR_T5,
LOONGARCH_GPR_T6,
LOONGARCH_GPR_T7,
LOONGARCH_GPR_T8,
LOONGARCH_GPR_FP = 22,
LOONGARCH_GPR_S0 = 23,
LOONGARCH_GPR_S1,
LOONGARCH_GPR_S2,
LOONGARCH_GPR_S3,
LOONGARCH_GPR_S4,
LOONGARCH_GPR_S5,
LOONGARCH_GPR_S6,
LOONGARCH_GPR_S7,
LOONGARCH_GPR_S8,
LOONGARCH_GPR_MAX
};
#define is_imm12_negative(val) is_imm_negative(val, 12)
static inline bool is_imm_negative(unsigned long val, unsigned int bit)
{
return val & (1UL << (bit - 1));
}
static inline bool is_branch_ins(union loongarch_instruction *ip)
{
return ip->reg1i21_format.opcode >= beqz_op &&
ip->reg1i21_format.opcode <= bgeu_op;
}
static inline bool is_ra_save_ins(union loongarch_instruction *ip)
{
/* st.d $ra, $sp, offset */
return ip->reg2i12_format.opcode == std_op &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_RA &&
!is_imm12_negative(ip->reg2i12_format.immediate);
}
static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
{
/* addi.d $sp, $sp, -imm */
return ip->reg2i12_format.opcode == addid_op &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_SP &&
is_imm12_negative(ip->reg2i12_format.immediate);
}
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest);
#endif /* _ASM_INST_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
#define ARCH_HAS_IOREMAP_WC
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/string.h>
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#define early_memremap early_ioremap
#define early_memunmap early_iounmap
#ifdef CONFIG_ARCH_IOREMAP
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long prot_val)
{
if (prot_val & _CACHE_CC)
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
else
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
}
#define ioremap(offset, size) \
ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
#define iounmap(addr) do { } while (0)
#endif
/*
* On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache().
* They map bus memory into CPU space, the mapped memory is marked uncachable
* (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and
* cachable (_CACHE_CC) respectively for CPU access.
*
* @offset: bus address of the memory
* @size: size of the resource to map
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC))
#define ioremap_cache(offset, size) \
ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
#define mmiowb() asm volatile ("dbar 0" ::: "memory")
/*
* String version of I/O memory access operations.
*/
extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
#define memset_io(c, v, l) __memset_io((c), (v), (l))
#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
#include <asm-generic/io.h>
#endif /* _ASM_IO_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
#include <linux/irqdomain.h>
#include <linux/irqreturn.h>
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
DECLARE_PER_CPU(unsigned long, irq_stack);
/*
* The highest address on the IRQ stack contains a dummy frame which is
* structured as follows:
*
* top ------------
* | task sp | <- irq_stack[cpu] + IRQ_STACK_START
* ------------
* | | <- First frame of IRQ context
* ------------
*
* task sp holds a copy of the task stack pointer where the struct pt_regs
* from exception entry can be found.
*/
static inline bool on_irq_stack(int cpu, unsigned long sp)
{
unsigned long low = per_cpu(irq_stack, cpu);
unsigned long high = low + IRQ_STACK_SIZE;
return (low <= sp && sp <= high);
}
void spurious_interrupt(void);
#define NR_IRQS_LEGACY 16
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
bool arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self);
#define MAX_IO_PICS 2
#define NR_IRQS (64 + (256 * MAX_IO_PICS))
struct acpi_vector_group {
int node;
int pci_segment;
struct irq_domain *parent;
};
extern struct acpi_vector_group pch_group[MAX_IO_PICS];
extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define CORES_PER_EIO_NODE 4
#define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */
#define LOONGSON_CPU_THSENS_VEC 14 /* CPU Thsens */
#define LOONGSON_CPU_HT0_VEC 16 /* CPU HT0 irq vector base number */
#define LOONGSON_CPU_HT1_VEC 24 /* CPU HT1 irq vector base number */
/* IRQ number definitions */
#define LOONGSON_LPC_IRQ_BASE 0
#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15)
#define LOONGSON_CPU_IRQ_BASE 16
#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14)
#define LOONGSON_PCH_IRQ_BASE 64
#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47)
#define LOONGSON_PCH_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 64 - 1)
#define LOONGSON_MSI_IRQ_BASE (LOONGSON_PCH_IRQ_BASE + 64)
#define LOONGSON_MSI_LAST_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
#define GSI_MIN_LPC_IRQ LOONGSON_LPC_IRQ_BASE
#define GSI_MAX_LPC_IRQ (LOONGSON_LPC_IRQ_BASE + 16 - 1)
#define GSI_MIN_CPU_IRQ LOONGSON_CPU_IRQ_BASE
#define GSI_MAX_CPU_IRQ (LOONGSON_CPU_IRQ_BASE + 48 - 1)
#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE
#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
struct acpi_madt_lio_pic;
struct acpi_madt_eio_pic;
struct acpi_madt_ht_pic;
struct acpi_madt_bio_pic;
struct acpi_madt_msi_pic;
struct acpi_madt_lpc_pic;
int liointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lio_pic *acpi_liointc);
int eiointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_eio_pic *acpi_eiointc);
int htvec_acpi_init(struct irq_domain *parent,
struct acpi_madt_ht_pic *acpi_htvec);
int pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc);
int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi);
int pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic);
int find_pch_pic(u32 gsi);
struct fwnode_handle *get_pch_msi_handle(int pci_segment);
extern struct acpi_madt_lio_pic *acpi_liointc;
extern struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS];
extern struct acpi_madt_ht_pic *acpi_htintc;
extern struct acpi_madt_lpc_pic *acpi_pchlpc;
extern struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS];
extern struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS];
extern struct fwnode_handle *cpuintc_handle;
extern struct fwnode_handle *liointc_handle;
extern struct fwnode_handle *pch_lpc_handle;
extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
#include <asm-generic/irq.h>
#endif /* _ASM_IRQ_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_IRQ_REGS_H
#define __ASM_IRQ_REGS_H
#define ARCH_HAS_OWN_IRQ_REGS
#include <linux/thread_info.h>
static inline struct pt_regs *get_irq_regs(void)
{
return current_thread_info()->regs;
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
{
struct pt_regs *old_regs;
old_regs = get_irq_regs();
current_thread_info()->regs = new_regs;
return old_regs;
}
#endif /* __ASM_IRQ_REGS_H */
此差异已折叠。
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_LOONGARCH_KDEBUG_H
#define _ASM_LOONGARCH_KDEBUG_H
#include <linux/notifier.h>
enum die_val {
DIE_OOPS = 1,
DIE_RI,
DIE_FP,
DIE_SIMD,
DIE_TRAP,
DIE_PAGE_FAULT,
DIE_BREAK,
DIE_SSTEPBP,
DIE_UPROBE,
DIE_UPROBE_XOL,
};
#endif /* _ASM_LOONGARCH_KDEBUG_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
#define __ALIGN .align 2
#define __ALIGN_STR __stringify(__ALIGN)
#define SYM_FUNC_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
.cfi_startproc;
#define SYM_FUNC_START_NOALIGN(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
.cfi_startproc;
#define SYM_FUNC_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) \
.cfi_startproc;
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
.cfi_startproc;
#define SYM_FUNC_START_WEAK(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) \
.cfi_startproc;
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
.cfi_startproc;
#define SYM_FUNC_END(name) \
.cfi_endproc; \
SYM_END(name, SYM_T_FUNC)
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册