“1d5a0f8033c12832f6f05778afd73e9e40b3129c”上不存在“...krb5/git@gitcode.net:openanolis/dragonwell8_jdk.git”
未验证 提交 d25fd25f 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!444 LoongArch: some LS7a device drivers support and LoongArch bug fix

Merge Pull Request from: @Hongchen_Zhang 
 
- add cpufreq support
- add gpio support
- add i2c support
- add spi support
- add rtc support
- add gpio support
- add s3/s4 support
- add LS7A modesetting drivers upport
- add LSX/LASX support
- fix ltp prctl test error
- fix compile error when CONFIG_DEBUG_INFO_BTF enabled
 
 
Link:https://gitee.com/openeuler/kernel/pulls/444 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -39,6 +39,7 @@ config LOONGARCH ...@@ -39,6 +39,7 @@ config LOONGARCH
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SPARSEMEM_ENABLE select ARCH_SPARSEMEM_ENABLE
...@@ -75,6 +76,8 @@ config LOONGARCH ...@@ -75,6 +76,8 @@ config LOONGARCH
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select CPU_SUPPORTS_LSX
select CPU_SUPPORTS_LASX
select GPIOLIB select GPIOLIB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
...@@ -145,6 +148,36 @@ config CPU_HAS_PREFETCH ...@@ -145,6 +148,36 @@ config CPU_HAS_PREFETCH
bool bool
default y default y
config CPU_HAS_LSX
bool "Support for the Loongson SIMD Extension"
depends on CPU_SUPPORTS_LSX
depends on 64BIT
help
Loongson SIMD Extension (LSX) introduces 128 bit wide vector registers
and a set of SIMD instructions to operate on them. When this option
is enabled the kernel will support allocating & switching LSX
vector register contexts. If you know that your kernel will only be
running on CPUs which do not support LSX or that your userland will
not be making use of it then you may wish to say N here to reduce
the size & complexity of your kernel.
If unsure, say Y.
config CPU_HAS_LASX
bool "Support for the Loongson Advanced SIMD Extension"
depends on CPU_SUPPORTS_LASX
depends on 64BIT && CPU_HAS_LSX
help
Loongson Advanced SIMD Extension is 256 bit wide SIMD extension.
If unsure, say Y.
config CPU_SUPPORTS_LSX
bool
config CPU_SUPPORTS_LASX
bool
config GENERIC_CALIBRATE_DELAY config GENERIC_CALIBRATE_DELAY
def_bool y def_bool y
...@@ -393,6 +426,13 @@ config NODES_SHIFT ...@@ -393,6 +426,13 @@ config NODES_SHIFT
default "6" default "6"
depends on NUMA depends on NUMA
config VA_BITS_40
bool "40-bits"
default y
depends on 64BIT
help
Support a maximum at least 40 bits of application virtual memory.
config FORCE_MAX_ZONEORDER config FORCE_MAX_ZONEORDER
int "Maximum zone order" int "Maximum zone order"
range 14 64 if PAGE_SIZE_64KB range 14 64 if PAGE_SIZE_64KB
...@@ -457,6 +497,9 @@ config ARCH_SPARSEMEM_ENABLE ...@@ -457,6 +497,9 @@ config ARCH_SPARSEMEM_ENABLE
or have huge holes in the physical address space for other reasons. or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa.rst> for more. See <file:Documentation/vm/numa.rst> for more.
config SYS_SUPPORTS_HUGETLBFS
def_bool y
config ARCH_ENABLE_THP_MIGRATION config ARCH_ENABLE_THP_MIGRATION
def_bool y def_bool y
depends on TRANSPARENT_HUGEPAGE depends on TRANSPARENT_HUGEPAGE
...@@ -477,8 +520,22 @@ config ARCH_MMAP_RND_BITS_MAX ...@@ -477,8 +520,22 @@ config ARCH_MMAP_RND_BITS_MAX
menu "Power management options" menu "Power management options"
config ARCH_HIBERNATION_POSSIBLE
def_bool y
config ARCH_SUSPEND_POSSIBLE
def_bool y
source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig" source "drivers/acpi/Kconfig"
endmenu endmenu
menu "CPU Power Management"
source "drivers/cpufreq/Kconfig"
endmenu
source "drivers/firmware/Kconfig" source "drivers/firmware/Kconfig"
...@@ -54,6 +54,7 @@ LDFLAGS_vmlinux += -G0 -static -n -nostdlib ...@@ -54,6 +54,7 @@ LDFLAGS_vmlinux += -G0 -static -n -nostdlib
# upgrade the compiler or downgrade the assembler. # upgrade the compiler or downgrade the assembler.
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += -mexplicit-relocs cflags-y += -mexplicit-relocs
KBUILD_CFLAGS_KERNEL += -mdirect-extern-access
else else
cflags-y += $(call cc-option,-mno-explicit-relocs) cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
...@@ -96,9 +97,13 @@ endif ...@@ -96,9 +97,13 @@ endif
head-y := arch/loongarch/kernel/head.o head-y := arch/loongarch/kernel/head.o
core-y += arch/loongarch/ core-y += arch/loongarch/
libs-y += arch/loongarch/lib/ libs-y += arch/loongarch/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/loongarch/power/
ifeq ($(KBUILD_EXTMOD),) ifeq ($(KBUILD_EXTMOD),)
prepare: vdso_prepare prepare: vdso_prepare
vdso_prepare: prepare0 vdso_prepare: prepare0
......
...@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y ...@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y CONFIG_TASKSTATS=y
...@@ -35,12 +35,18 @@ CONFIG_BPF_SYSCALL=y ...@@ -35,12 +35,18 @@ CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
CONFIG_PERF_EVENTS=y CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_HOTPLUG_CPU=y CONFIG_CPU_HAS_LSX=y
CONFIG_CPU_HAS_LASX=y
CONFIG_NUMA=y CONFIG_NUMA=y
CONFIG_HIBERNATION=y
CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_SPCR_TABLE=y
CONFIG_ACPI_DOCK=y CONFIG_ACPI_DOCK=y
CONFIG_ACPI_IPMI=m CONFIG_ACPI_IPMI=m
CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_PCI_SLOT=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_LOONGSON3_ACPI_CPUFREQ=y
CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EFI_TEST=m CONFIG_EFI_TEST=m
CONFIG_MODULES=y CONFIG_MODULES=y
...@@ -275,10 +281,17 @@ CONFIG_NET_SCH_TBF=m ...@@ -275,10 +281,17 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m CONFIG_NET_CLS_U32=m
CONFIG_NET_CLS_CGROUP=m CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_BPF=m
CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
CONFIG_NET_CLS_ACT=y CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m CONFIG_NET_ACT_GACT=m
...@@ -464,7 +477,7 @@ CONFIG_TXGBE=m ...@@ -464,7 +477,7 @@ CONFIG_TXGBE=m
# CONFIG_NET_VENDOR_RDC is not set # CONFIG_NET_VENDOR_RDC is not set
CONFIG_8139CP=m CONFIG_8139CP=m
CONFIG_8139TOO=m CONFIG_8139TOO=m
CONFIG_R8169=y CONFIG_R8169=m
# CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SAMSUNG is not set
...@@ -561,6 +574,11 @@ CONFIG_SENSORS_LM75=m ...@@ -561,6 +574,11 @@ CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83795=m CONFIG_SENSORS_W83795=m
CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627HF=m
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
CONFIG_WATCHDOG_SYSFS=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_GPIO_WATCHDOG=m
CONFIG_RC_CORE=m CONFIG_RC_CORE=m
CONFIG_LIRC=y CONFIG_LIRC=y
CONFIG_RC_DECODERS=y CONFIG_RC_DECODERS=y
...@@ -588,6 +606,7 @@ CONFIG_DRM_AMDGPU_SI=y ...@@ -588,6 +606,7 @@ CONFIG_DRM_AMDGPU_SI=y
CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_CIK=y
CONFIG_DRM_AMDGPU_USERPTR=y CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AST=y CONFIG_DRM_AST=y
CONFIG_DRM_LOONGSON=y
CONFIG_DRM_QXL=m CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_VIRTIO_GPU=m
CONFIG_FB_EFI=y CONFIG_FB_EFI=y
...@@ -649,7 +668,8 @@ CONFIG_USB_SERIAL_OPTION=m ...@@ -649,7 +668,8 @@ CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_GADGET=y CONFIG_USB_GADGET=y
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_RTC_CLASS=y CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y CONFIG_RTC_DRV_EFI=m
CONFIG_RTC_DRV_LS2X=y
CONFIG_DMADEVICES=y CONFIG_DMADEVICES=y
CONFIG_UIO=m CONFIG_UIO=m
CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_PDRV_GENIRQ=m
...@@ -726,6 +746,7 @@ CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" ...@@ -726,6 +746,7 @@ CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
CONFIG_PROC_KCORE=y CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=y CONFIG_CONFIGFS_FS=y
CONFIG_HFS_FS=m CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m CONFIG_HFSPLUS_FS=m
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifndef _ASM_LOONGARCH_ACPI_H #ifndef _ASM_LOONGARCH_ACPI_H
#define _ASM_LOONGARCH_ACPI_H #define _ASM_LOONGARCH_ACPI_H
#include <asm/suspend.h>
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
extern int acpi_strict; extern int acpi_strict;
extern int acpi_disabled; extern int acpi_disabled;
...@@ -15,7 +15,7 @@ extern int acpi_pci_disabled; ...@@ -15,7 +15,7 @@ extern int acpi_pci_disabled;
extern int acpi_noirq; extern int acpi_noirq;
#define acpi_os_ioremap acpi_os_ioremap #define acpi_os_ioremap acpi_os_ioremap
void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
static inline void disable_acpi(void) static inline void disable_acpi(void)
{ {
...@@ -35,4 +35,10 @@ extern struct list_head acpi_wakeup_device_list; ...@@ -35,4 +35,10 @@ extern struct list_head acpi_wakeup_device_list;
#define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT #define ACPI_TABLE_UPGRADE_MAX_PHYS ARCH_LOW_ADDRESS_LIMIT
static inline unsigned long acpi_get_wakeup_address(void)
{
return (unsigned long)loongarch_wakeup_start;
}
extern int loongarch_acpi_suspend(void);
extern int (*acpi_suspend_lowlevel)(void);
#endif /* _ASM_LOONGARCH_ACPI_H */ #endif /* _ASM_LOONGARCH_ACPI_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ALTERNATIVE_ASM_H
#define _ASM_ALTERNATIVE_ASM_H
#ifdef __ASSEMBLY__
#include <asm/asm.h>
/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
* instruction. See apply_alternatives().
*/
.macro altinstruction_entry orig alt feature orig_len alt_len
.long \orig - .
.long \alt - .
.short \feature
.byte \orig_len
.byte \alt_len
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
* @newinstr. ".fill" directive takes care of proper instruction padding
* in case @newinstr is longer than @oldinstr.
*/
.macro ALTERNATIVE oldinstr, newinstr, feature
140 :
\oldinstr
141 :
.fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x03400000
142 :
.pushsection .altinstructions, "a"
altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f
.popsection
.subsection 1
143 :
\newinstr
144 :
.previous
.endm
#define old_len (141b-140b)
#define new_len1 (144f-143f)
#define new_len2 (145f-144f)
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
/*
* Same as ALTERNATIVE macro above but for two alternatives. If CPU
* has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
* @feature2, it replaces @oldinstr with @feature2.
*/
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
140 :
\oldinstr
141 :
.fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
(alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x03400000
142 :
.pushsection .altinstructions, "a"
altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b
altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b
.popsection
.subsection 1
143 :
\newinstr1
144 :
\newinstr2
145 :
.previous
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_ALTERNATIVE_ASM_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ALTERNATIVE_H
#define _ASM_ALTERNATIVE_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
struct alt_instr {
s32 instr_offset; /* offset to original instruction */
s32 replace_offset; /* offset to replacement instruction */
u16 feature; /* feature bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction */
} __packed;
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
*/
extern int alternatives_patched;
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define b_replacement(num) "664"#num
#define e_replacement(num) "665"#num
#define alt_end_marker "663"
#define alt_slen "662b-661b"
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
#define __OLDINSTR(oldinstr, num) \
"661:\n\t" oldinstr "\n662:\n" \
".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
"((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x03400000\n"
#define OLDINSTR(oldinstr, num) \
__OLDINSTR(oldinstr, num) \
alt_end_marker ":\n"
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is
* additionally longer than the first replacement alternative.
*/
#define OLDINSTR_2(oldinstr, num1, num2) \
"661:\n\t" oldinstr "\n662:\n" \
".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, " \
"4, 0x03400000\n" \
alt_end_marker ":\n"
#define ALTINSTR_ENTRY(feature, num) \
" .long 661b - .\n" /* label */ \
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
" .short " __stringify(feature) "\n" /* feature bit */ \
" .byte " alt_total_slen "\n" /* source len */ \
" .byte " alt_rlen(num) "\n" /* replacement len */
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
OLDINSTR(oldinstr, 1) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
".subsection 1\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".previous\n"
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR_2(oldinstr, 1, 2) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
".subsection 1\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".previous\n"
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
(asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory"))
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
(asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory"))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_ALTERNATIVE_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_ASM_EXTABLE_H
#define __ASM_ASM_EXTABLE_H
#define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1
#define EX_TYPE_UACCESS_ERR_ZERO 2
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
.balign 4; \
.long ((insn) - .); \
.long ((fixup) - .); \
.short (type); \
.short (data); \
.popsection;
.macro _asm_extable, insn, fixup
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
.endm
#else /* __ASSEMBLY__ */
#include <linux/bits.h>
#include <linux/stringify.h>
#include <asm/gpr-num.h>
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
".pushsection __ex_table, \"a\"\n" \
".balign 4\n" \
".long ((" insn ") - .)\n" \
".long ((" fixup ") - .)\n" \
".short (" type ")\n" \
".short (" data ")\n" \
".popsection\n"
#define _ASM_EXTABLE(insn, fixup) \
__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(4, 0)
#define EX_DATA_REG_ZERO_SHIFT 5
#define EX_DATA_REG_ZERO GENMASK(9, 5)
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
__stringify(EX_TYPE_UACCESS_ERR_ZERO), \
"(" \
EX_DATA_REG(ERR, err) " | " \
EX_DATA_REG(ZERO, zero) \
")")
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ASM_EXTABLE_H */
...@@ -162,8 +162,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) ...@@ -162,8 +162,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.w %1, %2 \n" " sc.w %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "I" (-i)); : "I" (-i));
} else { } else {
...@@ -174,8 +176,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) ...@@ -174,8 +176,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.w %1, %2 \n" " sc.w %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "r" (i)); : "r" (i));
} }
...@@ -323,8 +327,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) ...@@ -323,8 +327,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.d %1, %2 \n" " sc.d %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "I" (-i)); : "I" (-i));
} else { } else {
...@@ -335,8 +341,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) ...@@ -335,8 +341,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.d %1, %2 \n" " sc.d %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "r" (i)); : "r" (i));
} }
......
...@@ -33,6 +33,10 @@ struct loongson_system_configuration { ...@@ -33,6 +33,10 @@ struct loongson_system_configuration {
int cores_per_package; int cores_per_package;
unsigned long cores_io_master; unsigned long cores_io_master;
const char *cpuname; const char *cpuname;
u64 suspend_addr;
u64 gpe0_ena_reg;
u8 pcie_wake_enabled;
u8 is_soc_cpu;
}; };
extern u64 efi_system_table; extern u64 efi_system_table;
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_BUGS_H
#define _ASM_BUGS_H
#include <asm/cpu.h>
#include <asm/cpu-info.h>
extern void check_bugs(void);
#endif /* _ASM_BUGS_H */
...@@ -9,8 +9,11 @@ ...@@ -9,8 +9,11 @@
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cacheops.h> #include <asm/cacheops.h>
extern void local_flush_icache_range(unsigned long start, unsigned long end); void local_flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_line_hit(unsigned long addr);
asmlinkage void cpu_flush_caches(void);
#define invalid_cache_line_hit(addr) flush_cache_line_hit(addr)
#define flush_icache_range local_flush_icache_range #define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range #define flush_icache_user_range local_flush_icache_range
...@@ -35,46 +38,26 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end); ...@@ -35,46 +38,26 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end);
: \ : \
: "i" (op), "ZC" (*(unsigned char *)(addr))) : "i" (op), "ZC" (*(unsigned char *)(addr)))
static inline void flush_icache_line_indexed(unsigned long addr) static inline bool cache_present(struct cache_desc *cdesc)
{ {
cache_op(Index_Invalidate_I, addr); return cdesc->flags & CACHE_PRESENT;
} }
static inline void flush_dcache_line_indexed(unsigned long addr) static inline bool cache_private(struct cache_desc *cdesc)
{ {
cache_op(Index_Writeback_Inv_D, addr); return cdesc->flags & CACHE_PRIVATE;
} }
static inline void flush_vcache_line_indexed(unsigned long addr) static inline bool cache_inclusive(struct cache_desc *cdesc)
{ {
cache_op(Index_Writeback_Inv_V, addr); return cdesc->flags & CACHE_INCLUSIVE;
} }
static inline void flush_scache_line_indexed(unsigned long addr) static inline unsigned int cpu_last_level_cache_line_size(void)
{ {
cache_op(Index_Writeback_Inv_S, addr); unsigned int cache_present = current_cpu_data.cache_leaves_present;
}
static inline void flush_icache_line(unsigned long addr) return current_cpu_data.cache_leaves[cache_present - 1].linesz;
{
cache_op(Hit_Invalidate_I, addr);
} }
static inline void flush_dcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_D, addr);
}
static inline void flush_vcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_V, addr);
}
static inline void flush_scache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_S, addr);
}
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
#endif /* _ASM_CACHEFLUSH_H */ #endif /* _ASM_CACHEFLUSH_H */
...@@ -8,16 +8,18 @@ ...@@ -8,16 +8,18 @@
#define __ASM_CACHEOPS_H #define __ASM_CACHEOPS_H
/* /*
* Most cache ops are split into a 2 bit field identifying the cache, and a 3 * Most cache ops are split into a 3 bit field identifying the cache, and a 2
* bit field identifying the cache operation. * bit field identifying the cache operation.
*/ */
#define CacheOp_Cache 0x03 #define CacheOp_Cache 0x07
#define CacheOp_Op 0x1c #define CacheOp_Op 0x18
#define Cache_I 0x00 #define Cache_LEAF0 0x00
#define Cache_D 0x01 #define Cache_LEAF1 0x01
#define Cache_V 0x02 #define Cache_LEAF2 0x02
#define Cache_S 0x03 #define Cache_LEAF3 0x03
#define Cache_LEAF4 0x04
#define Cache_LEAF5 0x05
#define Index_Invalidate 0x08 #define Index_Invalidate 0x08
#define Index_Writeback_Inv 0x08 #define Index_Writeback_Inv 0x08
...@@ -25,13 +27,17 @@ ...@@ -25,13 +27,17 @@
#define Hit_Writeback_Inv 0x10 #define Hit_Writeback_Inv 0x10
#define CacheOp_User_Defined 0x18 #define CacheOp_User_Defined 0x18
#define Index_Invalidate_I (Cache_I | Index_Invalidate) #define Index_Writeback_Inv_LEAF0 (Cache_LEAF0 | Index_Writeback_Inv)
#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF1 (Cache_LEAF1 | Index_Writeback_Inv)
#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF2 (Cache_LEAF2 | Index_Writeback_Inv)
#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF3 (Cache_LEAF3 | Index_Writeback_Inv)
#define Hit_Invalidate_I (Cache_I | Hit_Invalidate) #define Index_Writeback_Inv_LEAF4 (Cache_LEAF4 | Index_Writeback_Inv)
#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv) #define Index_Writeback_Inv_LEAF5 (Cache_LEAF5 | Index_Writeback_Inv)
#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv) #define Hit_Writeback_Inv_LEAF0 (Cache_LEAF0 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv) #define Hit_Writeback_Inv_LEAF1 (Cache_LEAF1 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF2 (Cache_LEAF2 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF3 (Cache_LEAF3 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF4 (Cache_LEAF4 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF5 (Cache_LEAF5 | Hit_Writeback_Inv)
#endif /* __ASM_CACHEOPS_H */ #endif /* __ASM_CACHEOPS_H */
...@@ -102,8 +102,10 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, ...@@ -102,8 +102,10 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
" move $t0, %z4 \n" \ " move $t0, %z4 \n" \
" " st " $t0, %1 \n" \ " " st " $t0, %1 \n" \
" beqz $t0, 1b \n" \ " beqz $t0, 1b \n" \
" b 3f \n" \
"2: \n" \ "2: \n" \
__WEAK_LLSC_MB \ __WEAK_LLSC_MB \
"3: \n" \
: "=&r" (__ret), "=ZB"(*m) \ : "=&r" (__ret), "=ZB"(*m) \
: "ZB"(*m), "Jr" (old), "Jr" (new) \ : "ZB"(*m), "Jr" (old), "Jr" (new) \
: "t0", "memory"); \ : "t0", "memory"); \
......
...@@ -19,11 +19,6 @@ ...@@ -19,11 +19,6 @@
#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT) #define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) #define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#define cpu_icache_line_size() cpu_data[0].icache.linesz
#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
#define cpu_vcache_line_size() cpu_data[0].vcache.linesz
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) # define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31 # define cpu_vabits 31
......
...@@ -9,19 +9,28 @@ ...@@ -9,19 +9,28 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
/* cache_desc->flags */
enum {
CACHE_PRESENT = (1 << 0),
CACHE_PRIVATE = (1 << 1), /* core private cache */
CACHE_INCLUSIVE = (1 << 2), /* include the lower level caches */
};
/* /*
* Descriptor for a cache * Descriptor for a cache
*/ */
struct cache_desc { struct cache_desc {
unsigned int waysize; /* Bytes per way */ unsigned char type;
unsigned char level;
unsigned short sets; /* Number of lines per set */ unsigned short sets; /* Number of lines per set */
unsigned char ways; /* Number of ways */ unsigned char ways; /* Number of ways */
unsigned char linesz; /* Size of line in bytes */ unsigned char linesz; /* Size of line in bytes */
unsigned char waybit; /* Bits to select in a cache set */
unsigned char flags; /* Flags describing cache properties */ unsigned char flags; /* Flags describing cache properties */
}; };
#define CACHE_LEAVES_MAX 6
#define CACHE_LEVEL_MAX 3
struct cpuinfo_loongarch { struct cpuinfo_loongarch {
u64 asid_cache; u64 asid_cache;
unsigned long asid_mask; unsigned long asid_mask;
...@@ -40,11 +49,8 @@ struct cpuinfo_loongarch { ...@@ -40,11 +49,8 @@ struct cpuinfo_loongarch {
int tlbsizemtlb; int tlbsizemtlb;
int tlbsizestlbsets; int tlbsizestlbsets;
int tlbsizestlbways; int tlbsizestlbways;
struct cache_desc icache; /* Primary I-cache */ unsigned int cache_leaves_present; /* number of cache_leaves[] elements */
struct cache_desc dcache; /* Primary D or combined I/D cache */ struct cache_desc cache_leaves[CACHE_LEAVES_MAX];
struct cache_desc vcache; /* Victim cache, between pcache and scache */
struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */
int core; /* physical core number in package */ int core; /* physical core number in package */
int package;/* physical package number */ int package;/* physical package number */
int vabits; /* Virtual Address size in bits */ int vabits; /* Virtual Address size in bits */
......
...@@ -18,6 +18,23 @@ void __init efi_runtime_init(void); ...@@ -18,6 +18,23 @@ void __init efi_runtime_init(void);
#define EFI_ALLOC_ALIGN SZ_64K #define EFI_ALLOC_ALIGN SZ_64K
#define EFI_RT_VIRTUAL_OFFSET CSR_DMW0_BASE #define EFI_RT_VIRTUAL_OFFSET CSR_DMW0_BASE
#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
#define LINUX_EFI_NEW_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
struct linux_efi_initrd {
unsigned long base;
unsigned long size;
};
struct efi_new_memmap {
unsigned long map_size;
unsigned long desc_size;
u32 desc_ver;
unsigned long map_key;
unsigned long buff_size;
efi_memory_desc_t map[];
};
static inline struct screen_info *alloc_screen_info(void) static inline struct screen_info *alloc_screen_info(void)
{ {
return &screen_info; return &screen_info;
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_EXTABLE_H
#define _ASM_LOONGARCH_EXTABLE_H
/*
* The exception table consists of pairs of relative offsets: the first
* is the relative offset to an instruction that is allowed to fault,
* and the second is the relative offset at which the program should
* continue. No registers are modified, so it is entirely up to the
* continuation code to figure out what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
int insn, fixup;
short type, data;
};
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->type = (b)->type; \
(b)->type = (tmp).type; \
(a)->data = (b)->data; \
(b)->data = (tmp).data; \
} while (0)
bool fixup_exception(struct pt_regs *regs);
#endif
...@@ -25,6 +25,30 @@ extern void _init_fpu(unsigned int); ...@@ -25,6 +25,30 @@ extern void _init_fpu(unsigned int);
extern void _save_fp(struct loongarch_fpu *); extern void _save_fp(struct loongarch_fpu *);
extern void _restore_fp(struct loongarch_fpu *); extern void _restore_fp(struct loongarch_fpu *);
extern void _save_lsx(struct loongarch_fpu *fpu);
extern void _restore_lsx(struct loongarch_fpu *fpu);
extern void _init_lsx_upper(void);
extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
extern void _save_lasx(struct loongarch_fpu *fpu);
extern void _restore_lasx(struct loongarch_fpu *fpu);
extern void _init_lasx_upper(void);
extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
static inline void enable_lsx(void);
static inline void disable_lsx(void);
static inline void save_lsx(struct task_struct *t);
static inline void restore_lsx(struct task_struct *t);
static inline void enable_lasx(void);
static inline void disable_lasx(void);
static inline void save_lasx(struct task_struct *t);
static inline void restore_lasx(struct task_struct *t);
#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ
DECLARE_PER_CPU(unsigned long, msa_count);
DECLARE_PER_CPU(unsigned long, lasx_count);
#endif
/* /*
* Mask the FCSR Cause bits according to the Enable bits, observing * Mask the FCSR Cause bits according to the Enable bits, observing
* that Unimplemented is always enabled. * that Unimplemented is always enabled.
...@@ -41,6 +65,29 @@ static inline int is_fp_enabled(void) ...@@ -41,6 +65,29 @@ static inline int is_fp_enabled(void)
1 : 0; 1 : 0;
} }
static inline int is_lsx_enabled(void)
{
if (!cpu_has_lsx)
return 0;
return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LSXEN) ?
1 : 0;
}
static inline int is_lasx_enabled(void)
{
if (!cpu_has_lasx)
return 0;
return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LASXEN) ?
1 : 0;
}
static inline int is_simd_enabled(void)
{
return is_lsx_enabled() | is_lasx_enabled();
}
#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN) #define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)
#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN) #define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)
...@@ -78,9 +125,22 @@ static inline void own_fpu(int restore) ...@@ -78,9 +125,22 @@ static inline void own_fpu(int restore)
static inline void lose_fpu_inatomic(int save, struct task_struct *tsk) static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
{ {
if (is_fpu_owner()) { if (is_fpu_owner()) {
if (save) if (is_simd_enabled()) {
_save_fp(&tsk->thread.fpu); if (save) {
disable_fpu(); if (is_lasx_enabled())
save_lasx(tsk);
else
save_lsx(tsk);
}
disable_fpu();
disable_lsx();
disable_lasx();
clear_tsk_thread_flag(tsk, TIF_USEDSIMD);
} else {
if (save)
_save_fp(&tsk->thread.fpu);
disable_fpu();
}
clear_tsk_thread_flag(tsk, TIF_USEDFPU); clear_tsk_thread_flag(tsk, TIF_USEDFPU);
} }
KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
...@@ -126,4 +186,147 @@ static inline union fpureg *get_fpu_regs(struct task_struct *tsk) ...@@ -126,4 +186,147 @@ static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
return tsk->thread.fpu.fpr; return tsk->thread.fpu.fpr;
} }
enum {
CTX_LSX = 1,
CTX_LASX = 2,
};
static inline int is_simd_owner(void)
{
return test_thread_flag(TIF_USEDSIMD);
}
#ifdef CONFIG_CPU_HAS_LSX
static inline void enable_lsx(void)
{
if (cpu_has_lsx)
csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ
per_cpu(msa_count, raw_smp_processor_id())++;
#endif
}
static inline void disable_lsx(void)
{
if (cpu_has_lsx)
csr_xchg32(0, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
}
static inline void save_lsx(struct task_struct *t)
{
if (cpu_has_lsx)
_save_lsx(&t->thread.fpu);
}
static inline void restore_lsx(struct task_struct *t)
{
if (cpu_has_lsx)
_restore_lsx(&t->thread.fpu);
}
static inline void init_lsx_upper(void)
{
/*
* Check cpu_has_lsx only if it's a constant. This will allow the
* compiler to optimise out code for CPUs without LSX without adding
* an extra redundant check for CPUs with LSX.
*/
if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx)
return;
_init_lsx_upper();
}
static inline void restore_lsx_upper(struct task_struct *t)
{
if (cpu_has_lsx)
_restore_lsx_upper(&t->thread.fpu);
}
#else
static inline void enable_lsx(void) {}
static inline void disable_lsx(void) {}
static inline void save_lsx(struct task_struct *t) {}
static inline void restore_lsx(struct task_struct *t) {}
static inline void init_lsx_upper(void) {}
static inline void restore_lsx_upper(struct task_struct *t) {}
#endif
#ifdef CONFIG_CPU_HAS_LASX
static inline void enable_lasx(void)
{
if (cpu_has_lasx) {
csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ
per_cpu(lasx_count, raw_smp_processor_id())++;
#endif
}
}
static inline void disable_lasx(void)
{
if (cpu_has_lasx)
csr_xchg32(0, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
}
static inline void save_lasx(struct task_struct *t)
{
if (cpu_has_lasx)
_save_lasx(&t->thread.fpu);
}
static inline void restore_lasx(struct task_struct *t)
{
if (cpu_has_lasx)
_restore_lasx(&t->thread.fpu);
}
static inline void init_lasx_upper(void)
{
if (cpu_has_lasx)
_init_lasx_upper();
}
static inline void restore_lasx_upper(struct task_struct *t)
{
if (cpu_has_lasx)
_restore_lasx_upper(&t->thread.fpu);
}
#else
static inline void enable_lasx(void) {}
static inline void disable_lasx(void) {}
static inline void save_lasx(struct task_struct *t) {}
static inline void restore_lasx(struct task_struct *t) {}
static inline void init_lasx_upper(void) {}
static inline void restore_lasx_upper(struct task_struct *t) {}
#endif
static inline int thread_lsx_context_live(void)
{
int ret = 0;
if (__builtin_constant_p(cpu_has_lsx) && !cpu_has_lsx)
goto out;
ret = test_thread_flag(TIF_LSX_CTX_LIVE) ? CTX_LSX : 0;
out:
return ret;
}
static inline int thread_lasx_context_live(void)
{
int ret = 0;
if (__builtin_constant_p(cpu_has_lasx) && !cpu_has_lasx)
goto out;
ret = test_thread_flag(TIF_LASX_CTX_LIVE) ? CTX_LASX : 0;
out:
return ret;
}
#endif /* _ASM_FPU_H */ #endif /* _ASM_FPU_H */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/futex.h> #include <linux/futex.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -18,18 +19,11 @@ ...@@ -18,18 +19,11 @@
"2: sc.w $t0, %2 \n" \ "2: sc.w $t0, %2 \n" \
" beqz $t0, 1b \n" \ " beqz $t0, 1b \n" \
"3: \n" \ "3: \n" \
" .section .fixup,\"ax\" \n" \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
"4: li.w %0, %6 \n" \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
" b 3b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=&r" (oldval), \ : "=r" (ret), "=&r" (oldval), \
"=ZC" (*uaddr) \ "=ZC" (*uaddr) \
: "0" (0), "ZC" (*uaddr), "Jr" (oparg), \ : "0" (0), "ZC" (*uaddr), "Jr" (oparg) \
"i" (-EFAULT) \
: "memory", "t0"); \ : "memory", "t0"); \
} }
...@@ -84,19 +78,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv ...@@ -84,19 +78,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
" move $t0, %z5 \n" " move $t0, %z5 \n"
"2: sc.w $t0, %2 \n" "2: sc.w $t0, %2 \n"
" beqz $t0, 1b \n" " beqz $t0, 1b \n"
" b 5f \n"
"3: \n" "3: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
" .section .fixup,\"ax\" \n" "5: \n"
"4: li.d %0, %6 \n" _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0)
" b 3b \n" _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0)
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
: "+r" (ret), "=&r" (val), "=ZC" (*uaddr) : "+r" (ret), "=&r" (val), "=ZC" (*uaddr)
: "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval), : "ZC" (*uaddr), "Jr" (oldval), "Jr" (newval)
"i" (-EFAULT)
: "memory", "t0"); : "memory", "t0");
*uval = val; *uval = val;
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
#ifdef __ASSEMBLY__
.equ .L__gpr_num_zero, 0
.irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
.equ .L__gpr_num_$r\num, \num
.endr
#else /* __ASSEMBLY__ */
#define __DEFINE_ASM_GPR_NUMS \
" .equ .L__gpr_num_zero, 0\n" \
" .irp num,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
" .equ .L__gpr_num_$r\\num, \\num\n" \
" .endr\n" \
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GPR_NUM_H */
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/asm.h> #include <asm/asm.h>
#define INSN_NOP 0x03400000
#define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000
#define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000
#define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000 #define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000
...@@ -18,14 +20,25 @@ ...@@ -18,14 +20,25 @@
#define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN) #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN)
enum reg0i26_op {
b_op = 0x14,
bl_op = 0x15,
};
enum reg1i20_op { enum reg1i20_op {
lu12iw_op = 0x0a, lu12iw_op = 0x0a,
lu32id_op = 0x0b, lu32id_op = 0x0b,
pcaddi_op = 0x0c,
pcalau12i_op = 0x0d,
pcaddu12i_op = 0x0e,
pcaddu18i_op = 0x0f,
}; };
enum reg1i21_op { enum reg1i21_op {
beqz_op = 0x10, beqz_op = 0x10,
bnez_op = 0x11, bnez_op = 0x11,
bceqz_op = 0x12, /* bits[9:8] = 0x00 */
bcnez_op = 0x12, /* bits[9:8] = 0x01 */
}; };
enum reg2i12_op { enum reg2i12_op {
...@@ -138,6 +151,20 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit) ...@@ -138,6 +151,20 @@ static inline bool is_imm_negative(unsigned long val, unsigned int bit)
return val & (1UL << (bit - 1)); return val & (1UL << (bit - 1));
} }
static inline unsigned long sign_extend(unsigned long val, unsigned int idx)
{
if (!is_imm_negative(val, idx + 1))
return ((1UL << idx) - 1) & val;
else
return ~((1UL << idx) - 1) | val;
}
static inline bool is_pc_ins(union loongarch_instruction *ip)
{
return ip->reg1i20_format.opcode >= pcaddi_op &&
ip->reg1i20_format.opcode <= pcaddu18i_op;
}
static inline bool is_branch_ins(union loongarch_instruction *ip) static inline bool is_branch_ins(union loongarch_instruction *ip)
{ {
return ip->reg1i21_format.opcode >= beqz_op && return ip->reg1i21_format.opcode >= beqz_op &&
......
...@@ -17,16 +17,15 @@ static inline void arch_local_irq_enable(void) ...@@ -17,16 +17,15 @@ static inline void arch_local_irq_enable(void)
__asm__ __volatile__( __asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t" "csrxchg %[val], %[mask], %[reg]\n\t"
: [val] "+r" (flags) : [val] "+r" (flags)
: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) : [mask] "r" (flags), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory"); : "memory");
} }
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
u32 flags = 0;
__asm__ __volatile__( __asm__ __volatile__(
"csrxchg %[val], %[mask], %[reg]\n\t" "csrxchg $zero, %[mask], %[reg]\n\t"
: [val] "+r" (flags) :
: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD) : [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
: "memory"); : "memory");
} }
......
...@@ -224,6 +224,13 @@ static inline u32 read_cpucfg(u32 reg) ...@@ -224,6 +224,13 @@ static inline u32 read_cpucfg(u32 reg)
#define CPUCFG48_VFPU_CG BIT(2) #define CPUCFG48_VFPU_CG BIT(2)
#define CPUCFG48_RAM_CG BIT(3) #define CPUCFG48_RAM_CG BIT(3)
#define CACHE_WAYS_M GENMASK(15, 0)
#define CACHE_SETS_M GENMASK(23, 16)
#define CACHE_LSIZE_M GENMASK(30, 24)
#define CACHE_WAYS 0
#define CACHE_SETS 16
#define CACHE_LSIZE 24
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* CSR */ /* CSR */
......
...@@ -70,8 +70,6 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr) ...@@ -70,8 +70,6 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
#define LS7A_CHIPCFG_REG_BASE (LS7A_PCH_REG_BASE + 0x00010000) #define LS7A_CHIPCFG_REG_BASE (LS7A_PCH_REG_BASE + 0x00010000)
/* MISC reg base */ /* MISC reg base */
#define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000) #define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000)
/* ACPI regs */
#define LS7A_ACPI_REG_BASE (LS7A_MISC_REG_BASE + 0x00050000)
/* RTC regs */ /* RTC regs */
#define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100) #define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100)
...@@ -93,36 +91,6 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr) ...@@ -93,36 +91,6 @@ static inline void xconf_writeq(u64 val64, volatile void __iomem *addr)
#define LS7A_LPC_INT_CLR (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200c) #define LS7A_LPC_INT_CLR (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x200c)
#define LS7A_LPC_INT_POL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2010) #define LS7A_LPC_INT_POL (volatile void *)TO_UNCACHE(LS7A_PCH_REG_BASE + 0x2010)
#define LS7A_PMCON_SOC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x000)
#define LS7A_PMCON_RESUME_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x004)
#define LS7A_PMCON_RTC_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x008)
#define LS7A_PM1_EVT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x00c)
#define LS7A_PM1_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x010)
#define LS7A_PM1_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x014)
#define LS7A_PM1_TMR_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x018)
#define LS7A_P_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x01c)
#define LS7A_GPE0_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x028)
#define LS7A_GPE0_ENA_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x02c)
#define LS7A_RST_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x030)
#define LS7A_WD_SET_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x034)
#define LS7A_WD_TIMER_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x038)
#define LS7A_THSENS_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x04c)
#define LS7A_GEN_RTC_1_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x050)
#define LS7A_GEN_RTC_2_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x054)
#define LS7A_DPM_CFG_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x400)
#define LS7A_DPM_STS_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x404)
#define LS7A_DPM_CNT_REG (volatile void *)TO_UNCACHE(LS7A_ACPI_REG_BASE + 0x408)
typedef enum {
ACPI_PCI_HOTPLUG_STATUS = 1 << 1,
ACPI_CPU_HOTPLUG_STATUS = 1 << 2,
ACPI_MEM_HOTPLUG_STATUS = 1 << 3,
ACPI_POWERBUTTON_STATUS = 1 << 8,
ACPI_RTC_WAKE_STATUS = 1 << 10,
ACPI_PCI_WAKE_STATUS = 1 << 14,
ACPI_ANY_WAKE_STATUS = 1 << 15,
} AcpiEventStatusBits;
#define HT1LO_OFFSET 0xe0000000000UL #define HT1LO_OFFSET 0xe0000000000UL
/* PCI Configuration Space Base */ /* PCI Configuration Space Base */
......
...@@ -42,7 +42,11 @@ ...@@ -42,7 +42,11 @@
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
#ifdef CONFIG_VA_BITS_40
#define VA_BITS 40
#else
#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
#endif
#define PTRS_PER_PGD (PAGE_SIZE >> 3) #define PTRS_PER_PGD (PAGE_SIZE >> 3)
#if CONFIG_PGTABLE_LEVELS > 3 #if CONFIG_PGTABLE_LEVELS > 3
...@@ -294,9 +298,10 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -294,9 +298,10 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" or %[tmp], %[tmp], %[global] \n" " or %[tmp], %[tmp], %[global] \n"
__SC "%[tmp], %[buddy] \n" __SC "%[tmp], %[buddy] \n"
" beqz %[tmp], 1b \n" " beqz %[tmp], 1b \n"
" nop \n" " b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global)); : [global] "r" (page_global));
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* SECTION_SIZE_BITS 2^N: how big each section will be * SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/ */
#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ #define SECTION_SIZE_BITS 28
#define MAX_PHYSMEM_BITS 48 #define MAX_PHYSMEM_BITS 48
#ifndef CONFIG_SPARSEMEM_VMEMMAP #ifndef CONFIG_SPARSEMEM_VMEMMAP
......
...@@ -114,14 +114,6 @@ ...@@ -114,14 +114,6 @@
LONG_S zero, sp, PT_R0 LONG_S zero, sp, PT_R0
csrrd t0, LOONGARCH_CSR_PRMD csrrd t0, LOONGARCH_CSR_PRMD
LONG_S t0, sp, PT_PRMD LONG_S t0, sp, PT_PRMD
csrrd t0, LOONGARCH_CSR_CRMD
LONG_S t0, sp, PT_CRMD
csrrd t0, LOONGARCH_CSR_EUEN
LONG_S t0, sp, PT_EUEN
csrrd t0, LOONGARCH_CSR_ECFG
LONG_S t0, sp, PT_ECFG
csrrd t0, LOONGARCH_CSR_ESTAT
PTR_S t0, sp, PT_ESTAT
cfi_st ra, PT_R1, \docfi cfi_st ra, PT_R1, \docfi
cfi_st a0, PT_R4, \docfi cfi_st a0, PT_R4, \docfi
cfi_st a1, PT_R5, \docfi cfi_st a1, PT_R5, \docfi
...@@ -140,7 +132,6 @@ ...@@ -140,7 +132,6 @@
cfi_st fp, PT_R22, \docfi cfi_st fp, PT_R22, \docfi
/* Set thread_info if we're coming from user mode */ /* Set thread_info if we're coming from user mode */
csrrd t0, LOONGARCH_CSR_PRMD
andi t0, t0, 0x3 /* extract pplv bit */ andi t0, t0, 0x3 /* extract pplv bit */
beqz t0, 9f beqz t0, 9f
......
...@@ -5,8 +5,13 @@ ...@@ -5,8 +5,13 @@
#ifndef _ASM_STRING_H #ifndef _ASM_STRING_H
#define _ASM_STRING_H #define _ASM_STRING_H
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count); extern void *memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n); extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n); extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
#endif /* _ASM_STRING_H */ #endif /* _ASM_STRING_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_LOONGARCH_SUSPEND_H
#define _ASM_LOONGARCH_SUSPEND_H
void arch_common_resume(void);
void arch_common_suspend(void);
extern void loongarch_suspend_enter(void);
extern void loongarch_wakeup_start(void);
#endif /* _ASM_LOONGARCH_SUSPEND_H */
...@@ -112,6 +112,7 @@ static inline unsigned long current_stack_pointer(void) ...@@ -112,6 +112,7 @@ static inline unsigned long current_stack_pointer(void)
#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE) #define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */ #endif /* _ASM_THREAD_INFO_H */
...@@ -13,6 +13,7 @@ extern u64 cpu_clock_freq; ...@@ -13,6 +13,7 @@ extern u64 cpu_clock_freq;
extern u64 const_clock_freq; extern u64 const_clock_freq;
extern void sync_counter(void); extern void sync_counter(void);
extern void save_counter(void);
static inline unsigned int calc_const_freq(void) static inline unsigned int calc_const_freq(void)
{ {
......
...@@ -15,7 +15,8 @@ ...@@ -15,7 +15,8 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm-generic/extable.h> #include <asm/extable.h>
#include <asm/asm-extable.h>
extern u64 __ua_limit; extern u64 __ua_limit;
...@@ -202,16 +203,9 @@ do { \ ...@@ -202,16 +203,9 @@ do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " insn " %1, %2 \n" \ "1: " insn " %1, %2 \n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
"3: li.w %0, %3 \n" \
" move %1, $zero \n" \
" b 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 3b \n" \
" .previous \n" \
: "+r" (__gu_err), "=r" (__gu_tmp) \ : "+r" (__gu_err), "=r" (__gu_tmp) \
: "m" (__m(ptr)), "i" (-EFAULT)); \ : "m" (__m(ptr))); \
\ \
(val) = (__typeof__(*(ptr))) __gu_tmp; \ (val) = (__typeof__(*(ptr))) __gu_tmp; \
} }
...@@ -234,15 +228,9 @@ do { \ ...@@ -234,15 +228,9 @@ do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: " insn " %z2, %1 # __put_user_asm\n" \ "1: " insn " %z2, %1 # __put_user_asm\n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ _ASM_EXTABLE_UACCESS_ERR(1b, 2b,%0) \
"3: li.w %0, %3 \n" \
" b 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" " __UA_ADDR " 1b, 3b \n" \
" .previous \n" \
: "+r" (__pu_err), "=m" (__m(ptr)) \ : "+r" (__pu_err), "=m" (__m(ptr)) \
: "Jr" (__pu_val), "i" (-EFAULT)); \ : "Jr" (__pu_val)); \
} }
#define HAVE_GET_KERNEL_NOFAULT #define HAVE_GET_KERNEL_NOFAULT
......
...@@ -7,7 +7,16 @@ ...@@ -7,7 +7,16 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define cpu_relax() barrier() /*
* Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
* tight read loop is executed, because reads take priority over writes & the
* hardware (incorrectly) doesn't ensure that writes will eventually occur.
*
* Since spin loops of any kind should have a cpu_relax() in them, force an SFB
* flush from cpu_relax() such that any pending writes will become visible as
* expected.
*/
#define cpu_relax() smp_mb()
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_SYS_CLONE3
......
...@@ -7,7 +7,8 @@ extra-y := head.o vmlinux.lds ...@@ -7,7 +7,8 @@ extra-y := head.o vmlinux.lds
obj-y += cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ obj-y += cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
elf.o legacy_boot.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o elf.o legacy_boot.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
alternative.o platform.o
obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_EFI) += efi.o obj-$(CONFIG_EFI) += efi.o
......
...@@ -49,7 +49,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) ...@@ -49,7 +49,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
early_memunmap(map, size); early_memunmap(map, size);
} }
void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{ {
if (!memblock_is_memory(phys)) if (!memblock_is_memory(phys))
return ioremap(phys, size); return ioremap(phys, size);
...@@ -74,6 +74,30 @@ void __init acpi_boot_table_init(void) ...@@ -74,6 +74,30 @@ void __init acpi_boot_table_init(void)
} }
} }
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
u64 gpe0_ena;
if (acpi_gbl_reduced_hardware)
return 0;
if (acpi_gbl_FADT.xgpe0_block.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
goto err;
gpe0_ena = acpi_gbl_FADT.xgpe0_block.address +
acpi_gbl_FADT.gpe0_block_length / 2;
if (!gpe0_ena)
goto err;
loongson_sysconf.gpe0_ena_reg = TO_UNCACHE(gpe0_ena);
return 0;
err:
pr_err(PREFIX "Invalid BIOS FADT, disabling ACPI\n");
disable_acpi();
return -1;
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int set_processor_mask(u32 id, u32 flags) int set_processor_mask(u32 id, u32 flags)
{ {
...@@ -165,9 +189,16 @@ static void __init acpi_process_madt(void) ...@@ -165,9 +189,16 @@ static void __init acpi_process_madt(void)
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
acpi_parse_eio_master, MAX_IO_PICS); acpi_parse_eio_master, MAX_IO_PICS);
acpi_irq_model = ACPI_IRQ_MODEL_LPIC;
loongson_sysconf.nr_cpus = num_processors; loongson_sysconf.nr_cpus = num_processors;
} }
#ifdef CONFIG_ACPI_SLEEP
int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
#else
int (*acpi_suspend_lowlevel)(void);
#endif
int __init acpi_boot_init(void) int __init acpi_boot_init(void)
{ {
/* /*
...@@ -178,6 +209,8 @@ int __init acpi_boot_init(void) ...@@ -178,6 +209,8 @@ int __init acpi_boot_init(void)
loongson_sysconf.boot_cpu_id = read_csr_cpuid(); loongson_sysconf.boot_cpu_id = read_csr_cpuid();
acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
/* /*
* Process the Multiple APIC Description Table (MADT), if present * Process the Multiple APIC Description Table (MADT), if present
*/ */
......
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#include <asm/sections.h>
int __read_mostly alternatives_patched;
EXPORT_SYMBOL_GPL(alternatives_patched);
#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
static int __initdata_or_module debug_alternative;
static int __init debug_alt(char *str)
{
debug_alternative = 1;
return 1;
}
__setup("debug-alternative", debug_alt);
#define DPRINTK(fmt, args...) \
do { \
if (debug_alternative) \
printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
} while (0)
#define DUMP_WORDS(buf, count, fmt, args...) \
do { \
if (unlikely(debug_alternative)) { \
int _j; \
union loongarch_instruction *_buf = buf; \
\
if (!(count)) \
break; \
\
printk(KERN_DEBUG fmt, ##args); \
for (_j = 0; _j < count - 1; _j++) \
printk(KERN_CONT "<%08x> ", _buf[_j].word); \
printk(KERN_CONT "<%08x>\n", _buf[_j].word); \
} \
} while (0)
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
{
while (count--) {
insn->word = INSN_NOP;
insn++;
}
}
/* Is the jump addr in local .altinstructions */
static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
{
return jump >= (unsigned long)start && jump < (unsigned long)end;
}
static void __init_or_module recompute_jump(union loongarch_instruction *buf,
union loongarch_instruction *dest, union loongarch_instruction *src,
void *start, void *end)
{
unsigned int si, si_l, si_h;
unsigned long cur_pc, jump_addr, pc;
long offset;
cur_pc = (unsigned long)src;
pc = (unsigned long)dest;
si_l = src->reg0i26_format.immediate_l;
si_h = src->reg0i26_format.immediate_h;
switch (src->reg0i26_format.opcode) {
case b_op:
case bl_op:
jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
offset >>= 2;
buf->reg0i26_format.immediate_h = offset >> 16;
buf->reg0i26_format.immediate_l = offset;
return;
}
si_l = src->reg1i21_format.immediate_l;
si_h = src->reg1i21_format.immediate_h;
switch (src->reg1i21_format.opcode) {
case bceqz_op: /* bceqz_op = bcnez_op */
BUG_ON(buf->reg1i21_format.rj & BIT(4));
fallthrough;
case beqz_op:
case bnez_op:
jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
offset >>= 2;
buf->reg1i21_format.immediate_h = offset >> 16;
buf->reg1i21_format.immediate_l = offset;
return;
}
si = src->reg2i16_format.immediate;
switch (src->reg2i16_format.opcode) {
case beq_op:
case bne_op:
case blt_op:
case bge_op:
case bltu_op:
case bgeu_op:
jump_addr = cur_pc + sign_extend(si << 2, 17);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
offset >>= 2;
buf->reg2i16_format.immediate = offset;
return;
}
}
static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
{
int i;
for (i = 0; i < nr; i++) {
buf[i].word = src[i].word;
if (is_pc_ins(&src[i])) {
pr_err("Not support pcrel instruction at present!");
return -EINVAL;
}
if (is_branch_ins(&src[i]) &&
src[i].reg2i16_format.opcode != jirl_op) {
recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
}
}
return 0;
}
/*
* text_poke_early - Update instructions on a live kernel at boot time
*
* When you use this code to patch more than one byte of an instruction
* you need to make sure that other CPUs cannot execute this code in parallel.
* Also no thread must be currently preempted in the middle of these
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
union loongarch_instruction *buf, unsigned int nr)
{
int i;
unsigned long flags;
local_irq_save(flags);
for (i = 0; i < nr; i++)
insn[i].word = buf[i].word;
local_irq_restore(flags);
wbflush();
flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
return insn;
}
/*
* Replace instructions with better alternatives for this CPU type. This runs
* before SMP is initialized to avoid SMP problems with self modifying code.
* This implies that asymmetric systems where APs have less capabilities than
* the boot processor are not handled. Tough. Make sure you disable such
* features by hand.
*/
void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
struct alt_instr *a;
unsigned int nr_instr, nr_repl, nr_insnbuf;
union loongarch_instruction *instr, *replacement;
union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
DPRINTK("alt table %px, -> %px", start, end);
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
* Some kernel functions (e.g. memcpy, memset, etc) use this order to
* patch code.
*
* So be careful if you want to change the scan order to any other
* order.
*/
for (a = start; a < end; a++) {
nr_insnbuf = 0;
instr = (void *)&a->instr_offset + a->instr_offset;
replacement = (void *)&a->replace_offset + a->replace_offset;
BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->instrlen & 0x3);
BUG_ON(a->replacementlen & 0x3);
nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
if (!cpu_has(a->feature)) {
DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
a->feature, instr, a->instrlen,
replacement, a->replacementlen);
continue;
}
DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
a->feature, instr, a->instrlen,
replacement, a->replacementlen);
DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
copy_alt_insns(insnbuf, instr, replacement, nr_repl);
nr_insnbuf = nr_repl;
if (nr_instr > nr_repl) {
add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
nr_insnbuf += nr_instr - nr_repl;
}
DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
text_poke_early(instr, insnbuf, nr_insnbuf);
}
}
void __init alternative_instructions(void)
{
apply_alternatives(__alt_instructions, __alt_instructions_end);
alternatives_patched = 1;
}
...@@ -257,3 +257,15 @@ void output_smpboot_defines(void) ...@@ -257,3 +257,15 @@ void output_smpboot_defines(void)
BLANK(); BLANK();
} }
#endif #endif
#ifdef CONFIG_HIBERNATION
void output_pbe_defines(void)
{
COMMENT(" Linux struct pbe offsets. ");
OFFSET(PBE_ADDRESS, pbe, address);
OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
OFFSET(PBE_NEXT, pbe, next);
DEFINE(PBE_SIZE, sizeof(struct pbe));
BLANK();
}
#endif
...@@ -5,69 +5,28 @@ ...@@ -5,69 +5,28 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <linux/cacheinfo.h> #include <linux/cacheinfo.h>
#include <linux/of.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/cpu-info.h> #include <asm/cpu-info.h>
/* Populates leaf and increments to next leaf */
#define populate_cache(cache, leaf, c_level, c_type) \
do { \
leaf->type = c_type; \
leaf->level = c_level; \
leaf->coherency_line_size = c->cache.linesz; \
leaf->number_of_sets = c->cache.sets; \
leaf->ways_of_associativity = c->cache.ways; \
leaf->size = c->cache.linesz * c->cache.sets * \
c->cache.ways; \
if (leaf->level > 2) \
leaf->size *= nodes_per_package; \
leaf++; \
} while (0)
int init_cache_level(unsigned int cpu) int init_cache_level(unsigned int cpu)
{ {
struct cpuinfo_loongarch *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
int levels = 0, leaves = 0; unsigned int cache_present = current_cpu_data.cache_leaves_present;
this_cpu_ci->num_levels =
/* current_cpu_data.cache_leaves[cache_present - 1].level;
* If Dcache is not set, we assume the cache structures this_cpu_ci->num_leaves = cache_present;
* are not properly initialized.
*/
if (c->dcache.waysize)
levels += 1;
else
return -ENOENT;
leaves += (c->icache.waysize) ? 2 : 1;
if (c->vcache.waysize) {
levels++;
leaves++;
}
if (c->scache.waysize) {
levels++;
leaves++;
}
if (c->tcache.waysize) {
levels++;
leaves++;
}
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0; return 0;
} }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf) struct cacheinfo *sib_leaf)
{ {
return !((this_leaf->level == 1) || (this_leaf->level == 2)); return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE) &&
!(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
} }
static void cache_cpumap_setup(unsigned int cpu) static void __cache_cpumap_setup(unsigned int cpu)
{ {
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf; struct cacheinfo *this_leaf, *sib_leaf;
...@@ -85,8 +44,11 @@ static void cache_cpumap_setup(unsigned int cpu) ...@@ -85,8 +44,11 @@ static void cache_cpumap_setup(unsigned int cpu)
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
if (i == cpu || !sib_cpu_ci->info_list) /* skip if itself or no cacheinfo or not in one
continue;/* skip if itself or no cacheinfo */ * physical node. */
if (i == cpu || !sib_cpu_ci->info_list ||
(cpu_to_node(i) != cpu_to_node(cpu)))
continue;
sib_leaf = sib_cpu_ci->info_list + index; sib_leaf = sib_cpu_ci->info_list + index;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) { if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
...@@ -98,33 +60,30 @@ static void cache_cpumap_setup(unsigned int cpu) ...@@ -98,33 +60,30 @@ static void cache_cpumap_setup(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu) int populate_cache_leaves(unsigned int cpu)
{ {
int level = 1, nodes_per_package = 1; struct cache_desc *cdesc_tmp, *cdesc = current_cpu_data.cache_leaves;
struct cpuinfo_loongarch *c = &current_cpu_data; unsigned int cache_present = current_cpu_data.cache_leaves_present;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list; struct cacheinfo *this_leaf = this_cpu_ci->info_list;
int i;
if (loongson_sysconf.nr_nodes > 1)
nodes_per_package = loongson_sysconf.cores_per_package for (i = 0; i < cache_present; i++) {
/ loongson_sysconf.cores_per_node; cdesc_tmp = cdesc + i;
if (c->icache.waysize) { this_leaf->type = cdesc_tmp->type;
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA); this_leaf->level = cdesc_tmp->level;
populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST); this_leaf->coherency_line_size = cdesc_tmp->linesz;
} else { this_leaf->number_of_sets = cdesc_tmp->sets;
populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED); this_leaf->ways_of_associativity = cdesc_tmp->ways;
this_leaf->size =
cdesc_tmp->linesz * cdesc_tmp->sets * cdesc_tmp->ways;
this_leaf->priv = &cdesc_tmp->flags;
this_leaf++;
} }
if (c->vcache.waysize) if (!of_have_populated_dt()) {
populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED); __cache_cpumap_setup(cpu);
this_cpu_ci->cpu_map_populated = true;
if (c->scache.waysize) }
populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
cache_cpumap_setup(cpu);
this_cpu_ci->cpu_map_populated = true;
return 0; return 0;
} }
...@@ -111,6 +111,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) ...@@ -111,6 +111,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_FPU; c->options |= LOONGARCH_CPU_FPU;
elf_hwcap |= HWCAP_LOONGARCH_FPU; elf_hwcap |= HWCAP_LOONGARCH_FPU;
} }
#ifdef CONFIG_CPU_HAS_LSX
if (config & CPUCFG2_LSX) {
c->options |= LOONGARCH_CPU_LSX;
elf_hwcap |= HWCAP_LOONGARCH_LSX;
}
#endif
#ifdef CONFIG_CPU_HAS_LASX
if (config & CPUCFG2_LASX) {
c->options |= LOONGARCH_CPU_LASX;
elf_hwcap |= HWCAP_LOONGARCH_LASX;
}
#endif
if (config & CPUCFG2_COMPLEX) { if (config & CPUCFG2_COMPLEX) {
c->options |= LOONGARCH_CPU_COMPLEX; c->options |= LOONGARCH_CPU_COMPLEX;
elf_hwcap |= HWCAP_LOONGARCH_COMPLEX; elf_hwcap |= HWCAP_LOONGARCH_COMPLEX;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/initrd.h>
#include <asm/early_ioremap.h> #include <asm/early_ioremap.h>
#include <asm/efi.h> #include <asm/efi.h>
...@@ -27,11 +28,18 @@ ...@@ -27,11 +28,18 @@
#include <asm/loongson.h> #include <asm/loongson.h>
#include "legacy_boot.h" #include "legacy_boot.h"
static __initdata unsigned long new_memmap = EFI_INVALID_TABLE_ADDR;
static __initdata unsigned long initrd = EFI_INVALID_TABLE_ADDR;
static unsigned long efi_nr_tables; static unsigned long efi_nr_tables;
static unsigned long efi_config_table; static unsigned long efi_config_table;
static efi_system_table_t *efi_systab; static efi_system_table_t *efi_systab;
static efi_config_table_type_t arch_tables[] __initdata = {{},}; static efi_config_table_type_t arch_tables[] __initdata = {
{LINUX_EFI_NEW_MEMMAP_GUID, &new_memmap, "NEWMEM"},
{LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD"},
{},
};
static __initdata pgd_t *pgd_efi; static __initdata pgd_t *pgd_efi;
static int __init efimap_populate_hugepages( static int __init efimap_populate_hugepages(
...@@ -184,6 +192,9 @@ static int __init set_virtual_map(void) ...@@ -184,6 +192,9 @@ static int __init set_virtual_map(void)
(efi_memory_desc_t *)TO_PHYS((unsigned long)runtime_map)); (efi_memory_desc_t *)TO_PHYS((unsigned long)runtime_map));
efi_unmap_pgt(); efi_unmap_pgt();
if (status != EFI_SUCCESS)
return -1;
return 0; return 0;
} }
...@@ -213,6 +224,44 @@ void __init efi_runtime_init(void) ...@@ -213,6 +224,44 @@ void __init efi_runtime_init(void)
set_bit(EFI_RUNTIME_SERVICES, &efi.flags); set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
} }
static void __init get_initrd(void)
{
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
struct linux_efi_initrd *tbl;
tbl = early_memremap(initrd, sizeof(*tbl));
if (tbl) {
phys_initrd_start = tbl->base;
phys_initrd_size = tbl->size;
early_memunmap(tbl, sizeof(*tbl));
}
}
}
static void __init init_new_memmap(void)
{
struct efi_new_memmap *tbl;
if (new_memmap == EFI_INVALID_TABLE_ADDR)
return;
tbl = early_memremap_ro(new_memmap, sizeof(*tbl));
if (tbl) {
struct efi_memory_map_data data;
data.phys_map = new_memmap + sizeof(*tbl);
data.size = tbl->map_size;
data.desc_size = tbl->desc_size;
data.desc_version = tbl->desc_ver;
if (efi_memmap_init_early(&data) < 0)
panic("Unable to map EFI memory map.\n");
early_memunmap(tbl, sizeof(*tbl));
}
}
void __init loongson_efi_init(void) void __init loongson_efi_init(void)
{ {
int size; int size;
...@@ -237,6 +286,10 @@ void __init loongson_efi_init(void) ...@@ -237,6 +286,10 @@ void __init loongson_efi_init(void)
efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables); efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables);
early_memunmap(config_tables, efi_nr_tables * size); early_memunmap(config_tables, efi_nr_tables * size);
get_initrd();
init_new_memmap();
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI)
memblock_reserve(screen_info.lfb_base, screen_info.lfb_size); memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
} }
...@@ -14,13 +14,14 @@ ...@@ -14,13 +14,14 @@
#include <asm/regdef.h> #include <asm/regdef.h>
#include <asm/stackframe.h> #include <asm/stackframe.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/unistd.h>
.text .text
.cfi_sections .debug_frame .cfi_sections .debug_frame
.align 5 .align 5
SYM_FUNC_START(handle_syscall) SYM_FUNC_START(handle_syscall)
csrrd t0, PERCPU_BASE_KS csrrd t0, PERCPU_BASE_KS
la.abs t1, kernelsp la.pcrel t1, kernelsp
add.d t1, t1, t0 add.d t1, t1, t0
move t2, sp move t2, sp
ld.d sp, t1, 0 ld.d sp, t1, 0
...@@ -28,19 +29,10 @@ SYM_FUNC_START(handle_syscall) ...@@ -28,19 +29,10 @@ SYM_FUNC_START(handle_syscall)
addi.d sp, sp, -PT_SIZE addi.d sp, sp, -PT_SIZE
cfi_st t2, PT_R3 cfi_st t2, PT_R3
cfi_rel_offset sp, PT_R3 cfi_rel_offset sp, PT_R3
st.d zero, sp, PT_R0
csrrd t2, LOONGARCH_CSR_PRMD csrrd t2, LOONGARCH_CSR_PRMD
st.d t2, sp, PT_PRMD st.d t2, sp, PT_PRMD
csrrd t2, LOONGARCH_CSR_CRMD
st.d t2, sp, PT_CRMD
csrrd t2, LOONGARCH_CSR_EUEN
st.d t2, sp, PT_EUEN
csrrd t2, LOONGARCH_CSR_ECFG
st.d t2, sp, PT_ECFG
csrrd t2, LOONGARCH_CSR_ESTAT
st.d t2, sp, PT_ESTAT
cfi_st ra, PT_R1 cfi_st ra, PT_R1
cfi_st a0, PT_R4 cfi_st a0, PT_ORIG_A0
cfi_st a1, PT_R5 cfi_st a1, PT_R5
cfi_st a2, PT_R6 cfi_st a2, PT_R6
cfi_st a3, PT_R7 cfi_st a3, PT_R7
...@@ -49,6 +41,7 @@ SYM_FUNC_START(handle_syscall) ...@@ -49,6 +41,7 @@ SYM_FUNC_START(handle_syscall)
cfi_st a6, PT_R10 cfi_st a6, PT_R10
cfi_st a7, PT_R11 cfi_st a7, PT_R11
csrrd ra, LOONGARCH_CSR_ERA csrrd ra, LOONGARCH_CSR_ERA
addi.d ra, ra, 4
st.d ra, sp, PT_ERA st.d ra, sp, PT_ERA
cfi_rel_offset ra, PT_ERA cfi_rel_offset ra, PT_ERA
...@@ -63,9 +56,17 @@ SYM_FUNC_START(handle_syscall) ...@@ -63,9 +56,17 @@ SYM_FUNC_START(handle_syscall)
and tp, tp, sp and tp, tp, sp
move a0, sp move a0, sp
move a1, a7
bl do_syscall bl do_syscall
RESTORE_ALL_AND_RET addi.w t0, zero, __NR_rt_sigreturn
bne a0, t0, 1f
RESTORE_STATIC
RESTORE_TEMP
1:
RESTORE_SOME
RESTORE_SP_AND_RET
SYM_FUNC_END(handle_syscall) SYM_FUNC_END(handle_syscall)
SYM_CODE_START(ret_from_fork) SYM_CODE_START(ret_from_fork)
......
...@@ -22,7 +22,8 @@ void __init init_environ(void) ...@@ -22,7 +22,8 @@ void __init init_environ(void)
{ {
int efi_boot = fw_arg0; int efi_boot = fw_arg0;
struct efi_memory_map_data data; struct efi_memory_map_data data;
void *fdt_ptr = early_memremap_ro(fw_arg1, SZ_64K); char *cmdline;
void *fdt_ptr;
if (efi_bp) if (efi_bp)
return; return;
...@@ -32,6 +33,20 @@ void __init init_environ(void) ...@@ -32,6 +33,20 @@ void __init init_environ(void)
else else
clear_bit(EFI_BOOT, &efi.flags); clear_bit(EFI_BOOT, &efi.flags);
if (fw_arg2 == 0)
goto parse_fdt;
cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE);
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
early_memunmap(cmdline, COMMAND_LINE_SIZE);
efi_system_table = fw_arg2;
return;
parse_fdt:
fdt_ptr = early_memremap_ro(fw_arg1, SZ_64K);
early_init_dt_scan(fdt_ptr); early_init_dt_scan(fdt_ptr);
early_init_fdt_reserve_self(); early_init_fdt_reserve_self();
efi_system_table = efi_get_fdt_params(&data); efi_system_table = efi_get_fdt_params(&data);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
*/ */
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/export.h> #include <asm/export.h>
...@@ -21,9 +22,29 @@ ...@@ -21,9 +22,29 @@
.macro EX insn, reg, src, offs .macro EX insn, reg, src, offs
.ex\@: \insn \reg, \src, \offs .ex\@: \insn \reg, \src, \offs
.section __ex_table,"a" _asm_extable .ex\@, fault
PTR .ex\@, fault .endm
.previous
.macro EX_V insn, reg, src, offs
parse_v __insn, \insn
parse_v __offs, \offs
parse_r __src, \src
parse_vr __reg, \reg
.ex\@:
.word __insn << 22 | __offs << 10 | __src << 5 | __reg
_asm_extable .ex\@, fault
.endm
.macro EX_XV insn, reg, src, offs
parse_v __insn, \insn
parse_v __offs, \offs
parse_r __src, \src
parse_xr __reg, \reg
.ex\@:
.word __insn << 22 | __offs << 10 | __src << 5 | __reg
_asm_extable .ex\@, fault
.endm .endm
.macro sc_save_fp base .macro sc_save_fp base
...@@ -146,6 +167,146 @@ ...@@ -146,6 +167,146 @@
movgr2fcsr fcsr0, \tmp0 movgr2fcsr fcsr0, \tmp0
.endm .endm
.macro sc_save_lsx base
EX_V 0xb1 $vr0, \base, (0 * LSX_REG_WIDTH)
EX_V 0xb1 $vr1, \base, (1 * LSX_REG_WIDTH)
EX_V 0xb1 $vr2, \base, (2 * LSX_REG_WIDTH)
EX_V 0xb1 $vr3, \base, (3 * LSX_REG_WIDTH)
EX_V 0xb1 $vr4, \base, (4 * LSX_REG_WIDTH)
EX_V 0xb1 $vr5, \base, (5 * LSX_REG_WIDTH)
EX_V 0xb1 $vr6, \base, (6 * LSX_REG_WIDTH)
EX_V 0xb1 $vr7, \base, (7 * LSX_REG_WIDTH)
EX_V 0xb1 $vr8, \base, (8 * LSX_REG_WIDTH)
EX_V 0xb1 $vr9, \base, (9 * LSX_REG_WIDTH)
EX_V 0xb1 $vr10, \base, (10 * LSX_REG_WIDTH)
EX_V 0xb1 $vr11, \base, (11 * LSX_REG_WIDTH)
EX_V 0xb1 $vr12, \base, (12 * LSX_REG_WIDTH)
EX_V 0xb1 $vr13, \base, (13 * LSX_REG_WIDTH)
EX_V 0xb1 $vr14, \base, (14 * LSX_REG_WIDTH)
EX_V 0xb1 $vr15, \base, (15 * LSX_REG_WIDTH)
EX_V 0xb1 $vr16, \base, (16 * LSX_REG_WIDTH)
EX_V 0xb1 $vr17, \base, (17 * LSX_REG_WIDTH)
EX_V 0xb1 $vr18, \base, (18 * LSX_REG_WIDTH)
EX_V 0xb1 $vr19, \base, (19 * LSX_REG_WIDTH)
EX_V 0xb1 $vr20, \base, (20 * LSX_REG_WIDTH)
EX_V 0xb1 $vr21, \base, (21 * LSX_REG_WIDTH)
EX_V 0xb1 $vr22, \base, (22 * LSX_REG_WIDTH)
EX_V 0xb1 $vr23, \base, (23 * LSX_REG_WIDTH)
EX_V 0xb1 $vr24, \base, (24 * LSX_REG_WIDTH)
EX_V 0xb1 $vr25, \base, (25 * LSX_REG_WIDTH)
EX_V 0xb1 $vr26, \base, (26 * LSX_REG_WIDTH)
EX_V 0xb1 $vr27, \base, (27 * LSX_REG_WIDTH)
EX_V 0xb1 $vr28, \base, (28 * LSX_REG_WIDTH)
EX_V 0xb1 $vr29, \base, (29 * LSX_REG_WIDTH)
EX_V 0xb1 $vr30, \base, (30 * LSX_REG_WIDTH)
EX_V 0xb1 $vr31, \base, (31 * LSX_REG_WIDTH)
.endm
.macro sc_restore_lsx base
EX_V 0xb0 $vr0, \base, (0 * LSX_REG_WIDTH)
EX_V 0xb0 $vr1, \base, (1 * LSX_REG_WIDTH)
EX_V 0xb0 $vr2, \base, (2 * LSX_REG_WIDTH)
EX_V 0xb0 $vr3, \base, (3 * LSX_REG_WIDTH)
EX_V 0xb0 $vr4, \base, (4 * LSX_REG_WIDTH)
EX_V 0xb0 $vr5, \base, (5 * LSX_REG_WIDTH)
EX_V 0xb0 $vr6, \base, (6 * LSX_REG_WIDTH)
EX_V 0xb0 $vr7, \base, (7 * LSX_REG_WIDTH)
EX_V 0xb0 $vr8, \base, (8 * LSX_REG_WIDTH)
EX_V 0xb0 $vr9, \base, (9 * LSX_REG_WIDTH)
EX_V 0xb0 $vr10, \base, (10 * LSX_REG_WIDTH)
EX_V 0xb0 $vr11, \base, (11 * LSX_REG_WIDTH)
EX_V 0xb0 $vr12, \base, (12 * LSX_REG_WIDTH)
EX_V 0xb0 $vr13, \base, (13 * LSX_REG_WIDTH)
EX_V 0xb0 $vr14, \base, (14 * LSX_REG_WIDTH)
EX_V 0xb0 $vr15, \base, (15 * LSX_REG_WIDTH)
EX_V 0xb0 $vr16, \base, (16 * LSX_REG_WIDTH)
EX_V 0xb0 $vr17, \base, (17 * LSX_REG_WIDTH)
EX_V 0xb0 $vr18, \base, (18 * LSX_REG_WIDTH)
EX_V 0xb0 $vr19, \base, (19 * LSX_REG_WIDTH)
EX_V 0xb0 $vr20, \base, (20 * LSX_REG_WIDTH)
EX_V 0xb0 $vr21, \base, (21 * LSX_REG_WIDTH)
EX_V 0xb0 $vr22, \base, (22 * LSX_REG_WIDTH)
EX_V 0xb0 $vr23, \base, (23 * LSX_REG_WIDTH)
EX_V 0xb0 $vr24, \base, (24 * LSX_REG_WIDTH)
EX_V 0xb0 $vr25, \base, (25 * LSX_REG_WIDTH)
EX_V 0xb0 $vr26, \base, (26 * LSX_REG_WIDTH)
EX_V 0xb0 $vr27, \base, (27 * LSX_REG_WIDTH)
EX_V 0xb0 $vr28, \base, (28 * LSX_REG_WIDTH)
EX_V 0xb0 $vr29, \base, (29 * LSX_REG_WIDTH)
EX_V 0xb0 $vr30, \base, (30 * LSX_REG_WIDTH)
EX_V 0xb0 $vr31, \base, (31 * LSX_REG_WIDTH)
.endm
.macro sc_save_lasx base
EX_XV 0xb3 $xr0, \base, (0 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr1, \base, (1 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr2, \base, (2 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr3, \base, (3 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr4, \base, (4 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr5, \base, (5 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr6, \base, (6 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr7, \base, (7 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr8, \base, (8 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr9, \base, (9 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr10, \base, (10 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr11, \base, (11 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr12, \base, (12 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr13, \base, (13 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr14, \base, (14 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr15, \base, (15 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr16, \base, (16 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr17, \base, (17 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr18, \base, (18 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr19, \base, (19 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr20, \base, (20 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr21, \base, (21 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr22, \base, (22 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr23, \base, (23 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr24, \base, (24 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr25, \base, (25 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr26, \base, (26 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr27, \base, (27 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr28, \base, (28 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr29, \base, (29 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr30, \base, (30 * LASX_REG_WIDTH)
EX_XV 0xb3 $xr31, \base, (31 * LASX_REG_WIDTH)
.endm
.macro sc_restore_lasx base
EX_XV 0xb2 $xr0, \base, (0 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr1, \base, (1 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr2, \base, (2 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr3, \base, (3 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr4, \base, (4 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr5, \base, (5 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr6, \base, (6 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr7, \base, (7 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr8, \base, (8 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr9, \base, (9 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr10, \base, (10 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr11, \base, (11 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr12, \base, (12 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr13, \base, (13 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr14, \base, (14 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr15, \base, (15 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr16, \base, (16 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr17, \base, (17 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr18, \base, (18 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr19, \base, (19 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr20, \base, (20 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr21, \base, (21 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr22, \base, (22 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr23, \base, (23 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr24, \base, (24 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr25, \base, (25 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr26, \base, (26 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr27, \base, (27 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr28, \base, (28 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr29, \base, (29 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr30, \base, (30 * LASX_REG_WIDTH)
EX_XV 0xb2 $xr31, \base, (31 * LASX_REG_WIDTH)
.endm
/* /*
* Save a thread's fp context. * Save a thread's fp context.
*/ */
...@@ -167,6 +328,76 @@ SYM_FUNC_START(_restore_fp) ...@@ -167,6 +328,76 @@ SYM_FUNC_START(_restore_fp)
jr ra jr ra
SYM_FUNC_END(_restore_fp) SYM_FUNC_END(_restore_fp)
#ifdef CONFIG_CPU_HAS_LSX
/*
* Save a thread's LSX vector context.
*/
SYM_FUNC_START(_save_lsx)
lsx_save_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_save_lsx)
EXPORT_SYMBOL(_save_lsx)
/*
* Restore a thread's LSX vector context.
*/
SYM_FUNC_START(_restore_lsx)
lsx_restore_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_restore_lsx)
SYM_FUNC_START(_save_lsx_upper)
lsx_save_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_save_lsx_upper)
SYM_FUNC_START(_restore_lsx_upper)
lsx_restore_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_restore_lsx_upper)
SYM_FUNC_START(_init_lsx_upper)
lsx_init_all_upper t1
jirl zero, ra, 0
SYM_FUNC_END(_init_lsx_upper)
#endif
#ifdef CONFIG_CPU_HAS_LASX
/*
* Save a thread's LASX vector context.
*/
SYM_FUNC_START(_save_lasx)
lasx_save_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_save_lasx)
EXPORT_SYMBOL(_save_lasx)
/*
* Restore a thread's LASX vector context.
*/
SYM_FUNC_START(_restore_lasx)
lasx_restore_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_restore_lasx)
SYM_FUNC_START(_save_lasx_upper)
lasx_save_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_save_lasx_upper)
SYM_FUNC_START(_restore_lasx_upper)
lasx_restore_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_restore_lasx_upper)
SYM_FUNC_START(_init_lasx_upper)
lasx_init_all_upper t1
jirl zero, ra, 0
SYM_FUNC_END(_init_lasx_upper)
#endif
/* /*
* Load the FPU with signalling NANS. This bit pattern we're using has * Load the FPU with signalling NANS. This bit pattern we're using has
* the property that no matter whether considered as single or as double * the property that no matter whether considered as single or as double
...@@ -245,6 +476,58 @@ SYM_FUNC_START(_restore_fp_context) ...@@ -245,6 +476,58 @@ SYM_FUNC_START(_restore_fp_context)
jr ra jr ra
SYM_FUNC_END(_restore_fp_context) SYM_FUNC_END(_restore_fp_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_save_lsx_context)
sc_save_fcc a1, t0, t1
sc_save_fcsr a2, t0
sc_save_lsx a0
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_save_lsx_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_restore_lsx_context)
sc_restore_lsx a0
sc_restore_fcc a1, t1, t2
sc_restore_fcsr a2, t1
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_restore_lsx_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_save_lasx_context)
sc_save_fcc a1, t0, t1
sc_save_fcsr a2, t0
sc_save_lasx a0
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_save_lasx_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_restore_lasx_context)
sc_restore_lasx a0
sc_restore_fcc a1, t1, t2
sc_restore_fcsr a2, t1
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_restore_lasx_context)
SYM_FUNC_START(fault) SYM_FUNC_START(fault)
li.w a0, -EFAULT # failure li.w a0, -EFAULT # failure
jr ra jr ra
......
...@@ -34,6 +34,7 @@ SYM_DATA(kernel_offset, .long kernel_offset - _text); ...@@ -34,6 +34,7 @@ SYM_DATA(kernel_offset, .long kernel_offset - _text);
__REF __REF
.align 12
SYM_CODE_START(kernel_entry) # kernel entry point SYM_CODE_START(kernel_entry) # kernel entry point
/* Config direct window and set PG */ /* Config direct window and set PG */
......
...@@ -239,7 +239,11 @@ int setup_legacy_IRQ(void) ...@@ -239,7 +239,11 @@ int setup_legacy_IRQ(void)
printk("CPU domain init eror!\n"); printk("CPU domain init eror!\n");
return -1; return -1;
} }
cpu_domain = get_cpudomain(); cpu_domain = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
if (!cpu_domain) {
printk("CPU domain error!\n");
return -1;
}
ret = liointc_acpi_init(cpu_domain, acpi_liointc); ret = liointc_acpi_init(cpu_domain, acpi_liointc);
if (ret) { if (ret) {
printk("Liointc domain init eror!\n"); printk("Liointc domain init eror!\n");
...@@ -269,7 +273,11 @@ int setup_legacy_IRQ(void) ...@@ -269,7 +273,11 @@ int setup_legacy_IRQ(void)
pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[0], 0); pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[0], 0);
} }
pic_domain = get_pchpic_irq_domain(); pic_domain = irq_find_matching_fwnode(pch_pic_handle[0], DOMAIN_BUS_ANY);
if (!pic_domain) {
printk("Pic domain error!\n");
return -1;
}
if (pic_domain) if (pic_domain)
pch_lpc_acpi_init(pic_domain, acpi_pchlpc); pch_lpc_acpi_init(pic_domain, acpi_pchlpc);
...@@ -517,7 +525,7 @@ unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigne ...@@ -517,7 +525,7 @@ unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigne
{ {
int ret; int ret;
if (!bpi) if (!bpi || (argc < 2))
return -1; return -1;
efi_bp = (struct boot_params *)bpi; efi_bp = (struct boot_params *)bpi;
bpi_version = get_bpi_version(&efi_bp->signature); bpi_version = get_bpi_version(&efi_bp->signature);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/alternative.h>
static inline bool signed_imm_check(long val, unsigned int bit) static inline bool signed_imm_check(long val, unsigned int bit)
{ {
...@@ -466,3 +467,17 @@ void *module_alloc(unsigned long size) ...@@ -466,3 +467,17 @@ void *module_alloc(unsigned long size)
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
} }
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (!strcmp(".altinstructions", secstrs + s->sh_name))
apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
}
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/smp.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
#include <asm/loongson.h>
extern int loongson_acpi_init(void);
static int __init loongson3_acpi_suspend_init(void)
{
#ifdef CONFIG_ACPI
acpi_status status;
unsigned long long suspend_addr = 0;
if (acpi_disabled || acpi_gbl_reduced_hardware)
return 0;
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr);
if (ACPI_FAILURE(status) || !suspend_addr) {
pr_err("ACPI S3 is not support!\n");
return -1;
}
loongson_sysconf.suspend_addr = (u64)phys_to_virt(TO_PHYS(suspend_addr));
#endif
return 0;
}
device_initcall(loongson3_acpi_suspend_init);
...@@ -105,8 +105,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ...@@ -105,8 +105,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
*/ */
preempt_disable(); preempt_disable();
if (is_fpu_owner()) if (is_fpu_owner()) {
save_fp(current); if (is_lasx_enabled())
save_lasx(current);
else if (is_lsx_enabled())
save_lsx(current);
else
save_fp(current);
}
preempt_enable(); preempt_enable();
......
...@@ -246,6 +246,90 @@ static int cfg_set(struct task_struct *target, ...@@ -246,6 +246,90 @@ static int cfg_set(struct task_struct *target,
return 0; return 0;
} }
#ifdef CONFIG_CPU_HAS_LSX
static void copy_pad_fprs(struct task_struct *target,
const struct user_regset *regset,
struct membuf *to, unsigned int live_sz)
{
int i, j;
unsigned long long fill = ~0ull;
unsigned int cp_sz, pad_sz;
cp_sz = min(regset->size, live_sz);
pad_sz = regset->size - cp_sz;
WARN_ON(pad_sz % sizeof(fill));
for (i = 0; i < NUM_FPU_REGS; i++) {
membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
membuf_store(to, fill);
}
}
}
static int simd_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
if (!tsk_used_math(target)) {
/* The task hasn't used FP or LSX, fill with 0xff */
copy_pad_fprs(target, regset, &to, 0);
} else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
/* Copy scalar FP context, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, 8);
#ifdef CONFIG_CPU_HAS_LASX
} else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
/* Copy LSX 128 Bit context, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, 16);
#endif
} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
membuf_write(&to, &target->thread.fpu.fpr, wr_size);
} else {
/* Copy as much context as possible, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
}
return 0;
}
static int simd_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
unsigned int cp_sz;
int i, err, start;
init_fp_ctx(target);
if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr,
0, wr_size);
} else {
/* Copy as much context as possible */
cp_sz = min_t(unsigned int, regset->size,
sizeof(target->thread.fpu.fpr[0]));
i = start = err = 0;
for (; i < NUM_FPU_REGS; i++, start += regset->size) {
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr[i],
start, start + cp_sz);
}
}
return err;
}
#endif /* CONFIG_CPU_HAS_LSX */
struct pt_regs_offset { struct pt_regs_offset {
const char *name; const char *name;
int offset; int offset;
...@@ -319,6 +403,12 @@ enum loongarch_regset { ...@@ -319,6 +403,12 @@ enum loongarch_regset {
REGSET_GPR, REGSET_GPR,
REGSET_FPR, REGSET_FPR,
REGSET_CPUCFG, REGSET_CPUCFG,
#ifdef CONFIG_CPU_HAS_LSX
REGSET_LSX,
#endif
#ifdef CONFIG_CPU_HAS_LASX
REGSET_LASX,
#endif
}; };
static const struct user_regset loongarch64_regsets[] = { static const struct user_regset loongarch64_regsets[] = {
...@@ -346,6 +436,26 @@ static const struct user_regset loongarch64_regsets[] = { ...@@ -346,6 +436,26 @@ static const struct user_regset loongarch64_regsets[] = {
.regset_get = cfg_get, .regset_get = cfg_get,
.set = cfg_set, .set = cfg_set,
}, },
#ifdef CONFIG_CPU_HAS_LSX
[REGSET_LSX] = {
.core_note_type = NT_LOONGARCH_LSX,
.n = NUM_FPU_REGS,
.size = 16,
.align = 16,
.regset_get = simd_get,
.set = simd_set,
},
#endif
#ifdef CONFIG_CPU_HAS_LASX
[REGSET_LASX] = {
.core_note_type = NT_LOONGARCH_LASX,
.n = NUM_FPU_REGS,
.size = 32,
.align = 32,
.regset_get = simd_get,
.set = simd_set,
},
#endif
}; };
static const struct user_regset_view user_loongarch64_view = { static const struct user_regset_view user_loongarch64_view = {
......
...@@ -29,7 +29,9 @@ ...@@ -29,7 +29,9 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <asm/alternative.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/bugs.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/dma.h> #include <asm/dma.h>
...@@ -79,6 +81,11 @@ const char *get_system_type(void) ...@@ -79,6 +81,11 @@ const char *get_system_type(void)
return "generic-loongson-machine"; return "generic-loongson-machine";
} }
void __init check_bugs(void)
{
alternative_instructions();
}
static const char *dmi_string_parse(const struct dmi_header *dm, u8 s) static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
{ {
const u8 *bp = ((u8 *) dm) + dm->length; const u8 *bp = ((u8 *) dm) + dm->length;
...@@ -198,10 +205,24 @@ static int __init early_parse_mem(char *p) ...@@ -198,10 +205,24 @@ static int __init early_parse_mem(char *p)
return 0; return 0;
} }
early_param("mem", early_parse_mem); early_param("mem", early_parse_mem);
static void __init set_pcie_wakeup(void)
{
acpi_status status;
u32 value;
if (loongson_sysconf.is_soc_cpu || acpi_gbl_reduced_hardware)
return;
status = acpi_read_bit_register(ACPI_BITREG_PCIEXP_WAKE_DISABLE, &value);
if (ACPI_FAILURE(status)) {
return;
}
loongson_sysconf.pcie_wake_enabled = !value;
}
void __init platform_init(void) void __init platform_init(void)
{ {
loongson_efi_init();
#ifdef CONFIG_ACPI_TABLE_UPGRADE #ifdef CONFIG_ACPI_TABLE_UPGRADE
acpi_table_upgrade(); acpi_table_upgrade();
#endif #endif
...@@ -210,6 +231,7 @@ void __init platform_init(void) ...@@ -210,6 +231,7 @@ void __init platform_init(void)
acpi_boot_table_init(); acpi_boot_table_init();
acpi_boot_init(); acpi_boot_init();
#endif #endif
set_pcie_wakeup();
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
init_numa_memory(); init_numa_memory();
...@@ -362,6 +384,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -362,6 +384,7 @@ void __init setup_arch(char **cmdline_p)
legacy_boot_init(fw_arg0, fw_arg1, fw_arg2); legacy_boot_init(fw_arg0, fw_arg1, fw_arg2);
init_environ(); init_environ();
loongson_efi_init();
memblock_init(); memblock_init();
pagetable_init(); pagetable_init();
parse_early_param(); parse_early_param();
......
...@@ -50,6 +50,16 @@ extern asmlinkage int ...@@ -50,6 +50,16 @@ extern asmlinkage int
_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
extern asmlinkage int extern asmlinkage int
_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr); _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
extern asmlinkage int
_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int _save_lsx_all_upper(void __user *buf);
extern asmlinkage int _restore_lsx_all_upper(void __user *buf);
struct rt_sigframe { struct rt_sigframe {
struct siginfo rs_info; struct siginfo rs_info;
...@@ -65,9 +75,29 @@ struct extctx_layout { ...@@ -65,9 +75,29 @@ struct extctx_layout {
unsigned long size; unsigned long size;
unsigned int flags; unsigned int flags;
struct _ctx_layout fpu; struct _ctx_layout fpu;
struct _ctx_layout lsx;
struct _ctx_layout lasx;
struct _ctx_layout end; struct _ctx_layout end;
}; };
/* LSX context */
#define LSX_CTX_MAGIC 0x53580001
#define LSX_CTX_ALIGN 16
struct lsx_context {
__u64 regs[2*32];
__u64 fcc;
__u32 fcsr;
};
/* LASX context */
#define LASX_CTX_MAGIC 0x41535801
#define LASX_CTX_ALIGN 32
struct lasx_context {
__u64 regs[4*32];
__u64 fcc;
__u32 fcsr;
};
static void __user *get_ctx_through_ctxinfo(struct sctx_info *info) static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
{ {
return (void __user *)((char *)info + sizeof(struct sctx_info)); return (void __user *)((char *)info + sizeof(struct sctx_info));
...@@ -115,6 +145,96 @@ static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx) ...@@ -115,6 +145,96 @@ static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
return err; return err;
} }
static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
{
int i;
int err = 0;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
&regs[2*i]);
err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
&regs[2*i+1]);
}
err |= __put_user(current->thread.fpu.fcc, fcc);
err |= __put_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
{
int i;
int err = 0;
u64 fpr_val;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __get_user(fpr_val, &regs[2*i]);
set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
err |= __get_user(fpr_val, &regs[2*i+1]);
set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
}
err |= __get_user(current->thread.fpu.fcc, fcc);
err |= __get_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
{
int i;
int err = 0;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
&regs[4*i]);
err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
&regs[4*i+1]);
err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 2),
&regs[4*i+2]);
err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 3),
&regs[4*i+3]);
}
err |= __put_user(current->thread.fpu.fcc, fcc);
err |= __put_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
{
int i;
int err = 0;
u64 fpr_val;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __get_user(fpr_val, &regs[4*i]);
set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
err |= __get_user(fpr_val, &regs[4*i+1]);
set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
err |= __get_user(fpr_val, &regs[4*i+2]);
set_fpr64(&current->thread.fpu.fpr[i], 2, fpr_val);
err |= __get_user(fpr_val, &regs[4*i+3]);
set_fpr64(&current->thread.fpu.fpr[i], 3, fpr_val);
}
err |= __get_user(current->thread.fpu.fcc, fcc);
err |= __get_user(current->thread.fpu.fcsr, fcsr);
return err;
}
/* /*
* Wrappers for the assembly _{save,restore}_fp_context functions. * Wrappers for the assembly _{save,restore}_fp_context functions.
*/ */
...@@ -136,6 +256,42 @@ static int restore_hw_fpu_context(struct fpu_context __user *ctx) ...@@ -136,6 +256,42 @@ static int restore_hw_fpu_context(struct fpu_context __user *ctx)
return _restore_fp_context(regs, fcc, fcsr); return _restore_fp_context(regs, fcc, fcsr);
} }
static int save_hw_lsx_context(struct lsx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _save_lsx_context(regs, fcc, fcsr);
}
static int restore_hw_lsx_context(struct lsx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _restore_lsx_context(regs, fcc, fcsr);
}
static int save_hw_lasx_context(struct lasx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _save_lasx_context(regs, fcc, fcsr);
}
static int restore_hw_lasx_context(struct lasx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _restore_lasx_context(regs, fcc, fcsr);
}
static int fcsr_pending(unsigned int __user *fcsr) static int fcsr_pending(unsigned int __user *fcsr)
{ {
int err, sig = 0; int err, sig = 0;
...@@ -227,6 +383,146 @@ static int protected_restore_fpu_context(struct extctx_layout *extctx) ...@@ -227,6 +383,146 @@ static int protected_restore_fpu_context(struct extctx_layout *extctx)
return err ?: sig; return err ?: sig;
} }
static int protected_save_lsx_context(struct extctx_layout *extctx)
{
int err = 0;
struct sctx_info __user *info = extctx->lsx.addr;
struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
uint64_t __user *fcc = &lsx_ctx->fcc;
uint32_t __user *fcsr = &lsx_ctx->fcsr;
while (1) {
lock_fpu_owner();
if (is_lsx_enabled())
err = save_hw_lsx_context(lsx_ctx);
else
err = copy_lsx_to_sigcontext(lsx_ctx);
unlock_fpu_owner();
err |= __put_user(LSX_CTX_MAGIC, &info->magic);
err |= __put_user(extctx->lsx.size, &info->size);
if (likely(!err))
break;
/* Touch the LSX context and try again */
err = __put_user(0, &regs[0]) |
__put_user(0, &regs[32*2-1]) |
__put_user(0, fcc) |
__put_user(0, fcsr);
if (err)
return err; /* really bad sigcontext */
}
return err;
}
static int protected_restore_lsx_context(struct extctx_layout *extctx)
{
int err = 0, sig = 0, tmp __maybe_unused;
struct sctx_info __user *info = extctx->lsx.addr;
struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
uint64_t __user *fcc = &lsx_ctx->fcc;
uint32_t __user *fcsr = &lsx_ctx->fcsr;
err = sig = fcsr_pending(fcsr);
if (err < 0)
return err;
while (1) {
lock_fpu_owner();
if (is_lsx_enabled())
err = restore_hw_lsx_context(lsx_ctx);
else
err = copy_lsx_from_sigcontext(lsx_ctx);
unlock_fpu_owner();
if (likely(!err))
break;
/* Touch the LSX context and try again */
err = __get_user(tmp, &regs[0]) |
__get_user(tmp, &regs[32*2-1]) |
__get_user(tmp, fcc) |
__get_user(tmp, fcsr);
if (err)
break; /* really bad sigcontext */
}
return err ?: sig;
}
static int protected_save_lasx_context(struct extctx_layout *extctx)
{
int err = 0;
struct sctx_info __user *info = extctx->lasx.addr;
struct lasx_context __user *lasx_ctx =
(struct lasx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
uint64_t __user *fcc = &lasx_ctx->fcc;
uint32_t __user *fcsr = &lasx_ctx->fcsr;
while (1) {
lock_fpu_owner();
if (is_lasx_enabled())
err = save_hw_lasx_context(lasx_ctx);
else
err = copy_lasx_to_sigcontext(lasx_ctx);
unlock_fpu_owner();
err |= __put_user(LASX_CTX_MAGIC, &info->magic);
err |= __put_user(extctx->lasx.size, &info->size);
if (likely(!err))
break;
/* Touch the LASX context and try again */
err = __put_user(0, &regs[0]) |
__put_user(0, &regs[32*4-1]) |
__put_user(0, fcc) |
__put_user(0, fcsr);
if (err)
return err; /* really bad sigcontext */
}
return err;
}
static int protected_restore_lasx_context(struct extctx_layout *extctx)
{
int err = 0, sig = 0, tmp __maybe_unused;
struct sctx_info __user *info = extctx->lasx.addr;
struct lasx_context __user *lasx_ctx =
(struct lasx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
uint64_t __user *fcc = &lasx_ctx->fcc;
uint32_t __user *fcsr = &lasx_ctx->fcsr;
err = sig = fcsr_pending(fcsr);
if (err < 0)
return err;
while (1) {
lock_fpu_owner();
if (is_lasx_enabled())
err = restore_hw_lasx_context(lasx_ctx);
else
err = copy_lasx_from_sigcontext(lasx_ctx);
unlock_fpu_owner();
if (likely(!err))
break;
/* Touch the LASX context and try again */
err = __get_user(tmp, &regs[0]) |
__get_user(tmp, &regs[32*4-1]) |
__get_user(tmp, fcc) |
__get_user(tmp, fcsr);
if (err)
break; /* really bad sigcontext */
}
return err ?: sig;
}
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
struct extctx_layout *extctx) struct extctx_layout *extctx)
{ {
...@@ -240,7 +536,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, ...@@ -240,7 +536,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
for (i = 1; i < 32; i++) for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]); err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
if (extctx->fpu.addr) if (extctx->lasx.addr)
err |= protected_save_lasx_context(extctx);
else if (extctx->lsx.addr)
err |= protected_save_lsx_context(extctx);
else if (extctx->fpu.addr)
err |= protected_save_fpu_context(extctx); err |= protected_save_fpu_context(extctx);
/* Set the "end" magic */ /* Set the "end" magic */
...@@ -274,6 +574,20 @@ static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout * ...@@ -274,6 +574,20 @@ static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *
extctx->fpu.addr = info; extctx->fpu.addr = info;
break; break;
case LSX_CTX_MAGIC:
if (size < (sizeof(struct sctx_info) +
sizeof(struct lsx_context)))
goto invalid;
extctx->lsx.addr = info;
break;
case LASX_CTX_MAGIC:
if (size < (sizeof(struct sctx_info) +
sizeof(struct lasx_context)))
goto invalid;
extctx->lasx.addr = info;
break;
default: default:
goto invalid; goto invalid;
} }
...@@ -319,7 +633,11 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc ...@@ -319,7 +633,11 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc
for (i = 1; i < 32; i++) for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
if (extctx.fpu.addr) if (extctx.lasx.addr)
err |= protected_restore_lasx_context(&extctx);
else if (extctx.lsx.addr)
err |= protected_restore_lsx_context(&extctx);
else if (extctx.fpu.addr)
err |= protected_restore_fpu_context(&extctx); err |= protected_restore_fpu_context(&extctx);
bad: bad:
...@@ -375,7 +693,13 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon ...@@ -375,7 +693,13 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon
extctx->size += extctx->end.size; extctx->size += extctx->end.size;
if (extctx->flags & SC_USED_FP) { if (extctx->flags & SC_USED_FP) {
if (cpu_has_fpu) if (cpu_has_lasx && thread_lasx_context_live())
new_sp = extframe_alloc(extctx, &extctx->lasx,
sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
else if (cpu_has_lsx && thread_lsx_context_live())
new_sp = extframe_alloc(extctx, &extctx->lsx,
sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
else if (cpu_has_fpu)
new_sp = extframe_alloc(extctx, &extctx->fpu, new_sp = extframe_alloc(extctx, &extctx->fpu,
sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp); sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/syscore_ops.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/sched/hotplug.h> #include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
*/ */
.align 5 .align 5
SYM_FUNC_START(__switch_to) SYM_FUNC_START(__switch_to)
csrrd t1, LOONGARCH_CSR_PRMD
stptr.d t1, a0, THREAD_CSRPRMD
cpu_save_nonscratch a0 cpu_save_nonscratch a0
stptr.d ra, a0, THREAD_REG01 stptr.d ra, a0, THREAD_REG01
stptr.d a3, a0, THREAD_SCHED_RA stptr.d a3, a0, THREAD_SCHED_RA
...@@ -30,8 +27,5 @@ SYM_FUNC_START(__switch_to) ...@@ -30,8 +27,5 @@ SYM_FUNC_START(__switch_to)
PTR_ADD t0, t0, tp PTR_ADD t0, t0, tp
set_saved_sp t0, t1, t2 set_saved_sp t0, t1, t2
ldptr.d t1, a1, THREAD_CSRPRMD
csrwr t1, LOONGARCH_CSR_PRMD
jr ra jr ra
SYM_FUNC_END(__switch_to) SYM_FUNC_END(__switch_to)
...@@ -37,18 +37,16 @@ void *sys_call_table[__NR_syscalls] = { ...@@ -37,18 +37,16 @@ void *sys_call_table[__NR_syscalls] = {
typedef long (*sys_call_fn)(unsigned long, unsigned long, typedef long (*sys_call_fn)(unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long); unsigned long, unsigned long, unsigned long, unsigned long);
void noinstr do_syscall(struct pt_regs *regs) unsigned long noinstr do_syscall(struct pt_regs *regs, unsigned long nr)
{ {
unsigned long nr;
sys_call_fn syscall_fn; sys_call_fn syscall_fn;
nr = regs->regs[11];
/* Set for syscall restarting */ /* Set for syscall restarting */
if (nr < NR_syscalls) if (nr < NR_syscalls)
regs->regs[0] = nr + 1; regs->regs[0] = nr + 1;
else
regs->regs[0] = 0;
regs->csr_era += 4;
regs->orig_a0 = regs->regs[4];
regs->regs[4] = -ENOSYS; regs->regs[4] = -ENOSYS;
nr = syscall_enter_from_user_mode(regs, nr); nr = syscall_enter_from_user_mode(regs, nr);
...@@ -60,4 +58,6 @@ void noinstr do_syscall(struct pt_regs *regs) ...@@ -60,4 +58,6 @@ void noinstr do_syscall(struct pt_regs *regs)
} }
syscall_exit_to_user_mode(regs); syscall_exit_to_user_mode(regs);
return nr;
} }
...@@ -115,7 +115,12 @@ static unsigned long __init get_loops_per_jiffy(void) ...@@ -115,7 +115,12 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj; return lpj;
} }
static long init_timeval; static long init_timeval __nosavedata;
void save_counter(void)
{
init_timeval = drdtime();
}
void sync_counter(void) void sync_counter(void)
{ {
......
...@@ -514,12 +514,67 @@ static void init_restore_fp(void) ...@@ -514,12 +514,67 @@ static void init_restore_fp(void)
BUG_ON(!is_fp_enabled()); BUG_ON(!is_fp_enabled());
} }
static void init_restore_lsx(void)
{
enable_lsx();
if (!thread_lsx_context_live()) {
/* First time LSX context user */
init_restore_fp();
init_lsx_upper();
set_thread_flag(TIF_LSX_CTX_LIVE);
} else {
if (!is_simd_owner()) {
if (is_fpu_owner()) {
restore_lsx_upper(current);
} else {
__own_fpu();
restore_lsx(current);
}
}
}
set_thread_flag(TIF_USEDSIMD);
BUG_ON(!is_fp_enabled());
BUG_ON(!is_lsx_enabled());
}
static void init_restore_lasx(void)
{
enable_lasx();
if (!thread_lasx_context_live()) {
/* First time LASX context user */
init_restore_lsx();
init_lasx_upper();
set_thread_flag(TIF_LASX_CTX_LIVE);
} else {
if (is_fpu_owner() || is_simd_owner()) {
init_restore_lsx();
restore_lasx_upper(current);
} else {
__own_fpu();
enable_lsx();
restore_lasx(current);
}
}
set_thread_flag(TIF_USEDSIMD);
BUG_ON(!is_fp_enabled());
BUG_ON(!is_lsx_enabled());
BUG_ON(!is_lasx_enabled());
}
asmlinkage void noinstr do_fpu(struct pt_regs *regs) asmlinkage void noinstr do_fpu(struct pt_regs *regs)
{ {
irqentry_state_t state = irqentry_enter(regs); irqentry_state_t state = irqentry_enter(regs);
local_irq_enable(); local_irq_enable();
die_if_kernel("do_fpu invoked from kernel context!", regs); die_if_kernel("do_fpu invoked from kernel context!", regs);
BUG_ON(is_lsx_enabled());
BUG_ON(is_lasx_enabled());
preempt_disable(); preempt_disable();
init_restore_fp(); init_restore_fp();
...@@ -534,7 +589,19 @@ asmlinkage void noinstr do_lsx(struct pt_regs *regs) ...@@ -534,7 +589,19 @@ asmlinkage void noinstr do_lsx(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs); irqentry_state_t state = irqentry_enter(regs);
local_irq_enable(); local_irq_enable();
force_sig(SIGILL); if (!cpu_has_lsx) {
force_sig(SIGILL);
goto out;
}
die_if_kernel("do_lsx invoked from kernel context!", regs);
BUG_ON(is_lasx_enabled());
preempt_disable();
init_restore_lsx();
preempt_enable();
out:
local_irq_disable(); local_irq_disable();
irqentry_exit(regs, state); irqentry_exit(regs, state);
...@@ -545,7 +612,18 @@ asmlinkage void noinstr do_lasx(struct pt_regs *regs) ...@@ -545,7 +612,18 @@ asmlinkage void noinstr do_lasx(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs); irqentry_state_t state = irqentry_enter(regs);
local_irq_enable(); local_irq_enable();
force_sig(SIGILL); if (!cpu_has_lasx) {
force_sig(SIGILL);
goto out;
}
die_if_kernel("do_lasx invoked from kernel context!", regs);
preempt_disable();
init_restore_lasx();
preempt_enable();
out:
local_irq_disable(); local_irq_disable();
irqentry_exit(regs, state); irqentry_exit(regs, state);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#define PAGE_SIZE _PAGE_SIZE #define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
/* /*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will * Put .bss..swapper_pg_dir as the first thing in .bss. This will
...@@ -53,7 +54,17 @@ SECTIONS ...@@ -53,7 +54,17 @@ SECTIONS
. = ALIGN(PECOFF_SEGMENT_ALIGN); . = ALIGN(PECOFF_SEGMENT_ALIGN);
_etext = .; _etext = .;
EXCEPTION_TABLE(16) /*
* struct alt_inst entries. From the header (alternative.h):
* "Alternative instructions for different CPU types or capabilities"
* Think locking instructions on spinlocks.
*/
. = ALIGN(4);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
. = ALIGN(PECOFF_SEGMENT_ALIGN); . = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .; __init_begin = .;
......
...@@ -3,4 +3,5 @@ ...@@ -3,4 +3,5 @@
# Makefile for LoongArch-specific library files. # Makefile for LoongArch-specific library files.
# #
lib-y += delay.o clear_user.o strnlen_user.o strncpy_user.o copy_user.o dump_tlb.o lib-y += delay.o clear_user.o strnlen_user.o strncpy_user.o copy_user.o dump_tlb.o \
memset.o memcpy.o memmove.o
...@@ -3,30 +3,37 @@ ...@@ -3,30 +3,37 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/regdef.h> #include <asm/regdef.h>
.macro fixup_ex from, to, offset, fix .irp to, 0, 1, 2, 3, 4, 5, 6, 7
.if \fix .L_fixup_handle_\to\():
.section .fixup, "ax" addi.d a0, a1, (\to) * (-8)
\to: addi.d a0, a1, \offset
jr ra jr ra
.previous .endr
.endif
.section __ex_table, "a" SYM_FUNC_START(__clear_user)
PTR \from\()b, \to\()b /*
.previous * Some CPUs support hardware unaligned access
.endm */
ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL
SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
/* /*
* unsigned long __clear_user(void *addr, size_t size) * unsigned long __clear_user_generic(void *addr, size_t size)
* *
* a0: addr * a0: addr
* a1: size * a1: size
*/ */
SYM_FUNC_START(__clear_user) SYM_FUNC_START(__clear_user_generic)
beqz a1, 2f beqz a1, 2f
1: st.b zero, a0, 0 1: st.b zero, a0, 0
...@@ -37,7 +44,55 @@ SYM_FUNC_START(__clear_user) ...@@ -37,7 +44,55 @@ SYM_FUNC_START(__clear_user)
2: move a0, a1 2: move a0, a1
jr ra jr ra
fixup_ex 1, 3, 0, 1 _asm_extable 1b, .L_fixup_handle_0
SYM_FUNC_END(__clear_user) SYM_FUNC_END(__clear_user_generic)
EXPORT_SYMBOL(__clear_user) /*
* unsigned long __clear_user_fast(void *addr, unsigned long size)
*
* a0: addr
* a1: size
*/
SYM_FUNC_START(__clear_user_fast)
beqz a1, 10f
ori a2, zero, 64
blt a1, a2, 9f
/* set 64 bytes at a time */
1: st.d zero, a0, 0
2: st.d zero, a0, 8
3: st.d zero, a0, 16
4: st.d zero, a0, 24
5: st.d zero, a0, 32
6: st.d zero, a0, 40
7: st.d zero, a0, 48
8: st.d zero, a0, 56
addi.d a0, a0, 64
addi.d a1, a1, -64
bge a1, a2, 1b
beqz a1, 10f
/* set the remaining bytes */
9: st.b zero, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, -1
bgt a1, zero, 9b
/* return */
10: move a0, a1
jr ra
/* fixup and ex_table */
_asm_extable 1b, .L_fixup_handle_0
_asm_extable 2b, .L_fixup_handle_1
_asm_extable 3b, .L_fixup_handle_2
_asm_extable 4b, .L_fixup_handle_3
_asm_extable 5b, .L_fixup_handle_4
_asm_extable 6b, .L_fixup_handle_5
_asm_extable 7b, .L_fixup_handle_6
_asm_extable 8b, .L_fixup_handle_7
_asm_extable 9b, .L_fixup_handle_0
SYM_FUNC_END(__clear_user_fast)
...@@ -3,31 +3,38 @@ ...@@ -3,31 +3,38 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/cpu.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/regdef.h> #include <asm/regdef.h>
.macro fixup_ex from, to, offset, fix .irp to, 0, 1, 2, 3, 4, 5, 6, 7
.if \fix .L_fixup_handle_\to\():
.section .fixup, "ax" addi.d a0, a2, (\to) * (-8)
\to: addi.d a0, a2, \offset
jr ra jr ra
.previous .endr
.endif
.section __ex_table, "a" SYM_FUNC_START(__copy_user)
PTR \from\()b, \to\()b /*
.previous * Some CPUs support hardware unaligned access
.endm */
ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL
SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user)
/* /*
* unsigned long __copy_user(void *to, const void *from, size_t n) * unsigned long __copy_user_generic(void *to, const void *from, size_t n)
* *
* a0: to * a0: to
* a1: from * a1: from
* a2: n * a2: n
*/ */
SYM_FUNC_START(__copy_user) SYM_FUNC_START(__copy_user_generic)
beqz a2, 3f beqz a2, 3f
1: ld.b t0, a1, 0 1: ld.b t0, a1, 0
...@@ -40,8 +47,77 @@ SYM_FUNC_START(__copy_user) ...@@ -40,8 +47,77 @@ SYM_FUNC_START(__copy_user)
3: move a0, a2 3: move a0, a2
jr ra jr ra
fixup_ex 1, 4, 0, 1 _asm_extable 1b, .L_fixup_handle_0
fixup_ex 2, 4, 0, 0 _asm_extable 2b, .L_fixup_handle_0
SYM_FUNC_END(__copy_user) SYM_FUNC_END(__copy_user_generic)
EXPORT_SYMBOL(__copy_user) /*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
*
* a0: to
* a1: from
* a2: n
*/
SYM_FUNC_START(__copy_user_fast)
beqz a2, 19f
ori a3, zero, 64
blt a2, a3, 17f
/* copy 64 bytes at a time */
1: ld.d t0, a1, 0
2: ld.d t1, a1, 8
3: ld.d t2, a1, 16
4: ld.d t3, a1, 24
5: ld.d t4, a1, 32
6: ld.d t5, a1, 40
7: ld.d t6, a1, 48
8: ld.d t7, a1, 56
9: st.d t0, a0, 0
10: st.d t1, a0, 8
11: st.d t2, a0, 16
12: st.d t3, a0, 24
13: st.d t4, a0, 32
14: st.d t5, a0, 40
15: st.d t6, a0, 48
16: st.d t7, a0, 56
addi.d a0, a0, 64
addi.d a1, a1, 64
addi.d a2, a2, -64
bge a2, a3, 1b
beqz a2, 19f
/* copy the remaining bytes */
17: ld.b t0, a1, 0
18: st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 17b
/* return */
19: move a0, a2
jr ra
/* fixup and ex_table */
_asm_extable 1b, .L_fixup_handle_0
_asm_extable 2b, .L_fixup_handle_1
_asm_extable 3b, .L_fixup_handle_2
_asm_extable 4b, .L_fixup_handle_3
_asm_extable 5b, .L_fixup_handle_4
_asm_extable 6b, .L_fixup_handle_5
_asm_extable 7b, .L_fixup_handle_6
_asm_extable 8b, .L_fixup_handle_7
_asm_extable 9b, .L_fixup_handle_0
_asm_extable 10b, .L_fixup_handle_1
_asm_extable 11b, .L_fixup_handle_2
_asm_extable 12b, .L_fixup_handle_3
_asm_extable 13b, .L_fixup_handle_4
_asm_extable 14b, .L_fixup_handle_5
_asm_extable 15b, .L_fixup_handle_6
_asm_extable 16b, .L_fixup_handle_7
_asm_extable 17b, .L_fixup_handle_0
_asm_extable 18b, .L_fixup_handle_0
SYM_FUNC_END(__copy_user_fast)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START(memcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memcpy_generic", \
"b __memcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memcpy)
EXPORT_SYMBOL(memcpy)
/*
* void *__memcpy_generic(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__memcpy_generic)
move a3, a0
beqz a2, 2f
1: ld.b t0, a1, 0
st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move a0, a3
jr ra
SYM_FUNC_END(__memcpy_generic)
/*
* void *__memcpy_fast(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__memcpy_fast)
move a3, a0
beqz a2, 3f
ori a4, zero, 64
blt a2, a4, 2f
/* copy 64 bytes at a time */
1: ld.d t0, a1, 0
ld.d t1, a1, 8
ld.d t2, a1, 16
ld.d t3, a1, 24
ld.d t4, a1, 32
ld.d t5, a1, 40
ld.d t6, a1, 48
ld.d t7, a1, 56
st.d t0, a0, 0
st.d t1, a0, 8
st.d t2, a0, 16
st.d t3, a0, 24
st.d t4, a0, 32
st.d t5, a0, 40
st.d t6, a0, 48
st.d t7, a0, 56
addi.d a0, a0, 64
addi.d a1, a1, 64
addi.d a2, a2, -64
bge a2, a4, 1b
beqz a2, 3f
/* copy the remaining bytes */
2: ld.b t0, a1, 0
st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 2b
/* return */
3: move a0, a3
jr ra
SYM_FUNC_END(__memcpy_fast)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START(memmove)
blt a0, a1, 1f /* dst < src, memcpy */
blt a1, a0, 3f /* src < dst, rmemcpy */
jr ra /* dst == src, return */
/* if (src - dst) < 64, copy 1 byte at a time */
1: ori a3, zero, 64
sub.d t0, a1, a0
blt t0, a3, 2f
b memcpy
2: b __memcpy_generic
/* if (dst - src) < 64, copy 1 byte at a time */
3: ori a3, zero, 64
sub.d t0, a0, a1
blt t0, a3, 4f
b rmemcpy
4: b __rmemcpy_generic
SYM_FUNC_END(memmove)
EXPORT_SYMBOL(memmove)
SYM_FUNC_START(rmemcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __rmemcpy_generic", \
"b __rmemcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(rmemcpy)
/*
* void *__rmemcpy_generic(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__rmemcpy_generic)
move a3, a0
beqz a2, 2f
add.d a0, a0, a2
add.d a1, a1, a2
1: ld.b t0, a1, -1
st.b t0, a0, -1
addi.d a0, a0, -1
addi.d a1, a1, -1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move a0, a3
jr ra
SYM_FUNC_END(__rmemcpy_generic)
/*
* void *__rmemcpy_fast(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__rmemcpy_fast)
move a3, a0
beqz a2, 3f
add.d a0, a0, a2
add.d a1, a1, a2
ori a4, zero, 64
blt a2, a4, 2f
/* copy 64 bytes at a time */
1: ld.d t0, a1, -8
ld.d t1, a1, -16
ld.d t2, a1, -24
ld.d t3, a1, -32
ld.d t4, a1, -40
ld.d t5, a1, -48
ld.d t6, a1, -56
ld.d t7, a1, -64
st.d t0, a0, -8
st.d t1, a0, -16
st.d t2, a0, -24
st.d t3, a0, -32
st.d t4, a0, -40
st.d t5, a0, -48
st.d t6, a0, -56
st.d t7, a0, -64
addi.d a0, a0, -64
addi.d a1, a1, -64
addi.d a2, a2, -64
bge a2, a4, 1b
beqz a2, 3f
/* copy the remaining bytes */
2: ld.b t0, a1, -1
st.b t0, a0, -1
addi.d a0, a0, -1
addi.d a1, a1, -1
addi.d a2, a2, -1
bgt a2, zero, 2b
/* return */
3: move a0, a3
jr ra
SYM_FUNC_END(__rmemcpy_fast)
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
.macro fill_to_64 r0
bstrins.d \r0, \r0, 15, 8
bstrins.d \r0, \r0, 31, 16
bstrins.d \r0, \r0, 63, 32
.endm
SYM_FUNC_START(memset)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memset_generic", \
"b __memset_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memset)
EXPORT_SYMBOL(memset)
/*
* void *__memset_generic(void *s, int c, size_t n)
*
* a0: s
* a1: c
* a2: n
*/
SYM_FUNC_START(__memset_generic)
move a3, a0
beqz a2, 2f
1: st.b a1, a0, 0
addi.d a0, a0, 1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move a0, a3
jr ra
SYM_FUNC_END(__memset_generic)
/*
* void *__memset_fast(void *s, int c, size_t n)
*
* a0: s
* a1: c
* a2: n
*/
SYM_FUNC_START(__memset_fast)
move a3, a0
beqz a2, 3f
ori a4, zero, 64
blt a2, a4, 2f
/* fill a1 to 64 bits */
fill_to_64 a1
/* set 64 bytes at a time */
1: st.d a1, a0, 0
st.d a1, a0, 8
st.d a1, a0, 16
st.d a1, a0, 24
st.d a1, a0, 32
st.d a1, a0, 40
st.d a1, a0, 48
st.d a1, a0, 56
addi.d a0, a0, 64
addi.d a2, a2, -64
bge a2, a4, 1b
beqz a2, 3f
/* set the remaining bytes */
2: st.b a1, a0, 0
addi.d a0, a0, 1
addi.d a2, a2, -1
bgt a2, zero, 2b
/* return */
3: move a0, a3
jr ra
SYM_FUNC_END(__memset_fast)
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/regdef.h> #include <asm/regdef.h>
#include <asm/asm-extable.h>
/* /*
* long __strncpy_from_user(char *to, const char *from, long len) * long __strncpy_from_user(char *to, const char *from, long len)
...@@ -39,10 +40,7 @@ SYM_FUNC_START(__strncpy_from_user) ...@@ -39,10 +40,7 @@ SYM_FUNC_START(__strncpy_from_user)
3: li.w a0, -EFAULT 3: li.w a0, -EFAULT
jr ra jr ra
.previous .previous
_asm_extable 1b, 3b
.section __ex_table, "a"
PTR 1b, 3b
.previous
SYM_FUNC_END(__strncpy_from_user) SYM_FUNC_END(__strncpy_from_user)
EXPORT_SYMBOL(__strncpy_from_user) EXPORT_SYMBOL(__strncpy_from_user)
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/regdef.h> #include <asm/regdef.h>
#include <asm/asm-extable.h>
/* /*
* long __strnlen_user(const char *s, long n) * long __strnlen_user(const char *s, long n)
...@@ -36,9 +37,7 @@ SYM_FUNC_START(__strnlen_user) ...@@ -36,9 +37,7 @@ SYM_FUNC_START(__strnlen_user)
jr ra jr ra
.previous .previous
.section __ex_table, "a" _asm_extable 1b, 3b
PTR 1b, 3b
.previous
SYM_FUNC_END(__strnlen_user) SYM_FUNC_END(__strnlen_user)
EXPORT_SYMBOL(__strnlen_user) EXPORT_SYMBOL(__strnlen_user)
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
* *
* Derived from MIPS: * Derived from MIPS:
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 MIPS Technologies, Inc. * Copyright (C) 2007 MIPS Technologies, Inc.
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/fcntl.h> #include <linux/fcntl.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -15,127 +15,225 @@ ...@@ -15,127 +15,225 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/cacheinfo.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/setup.h> #include <asm/setup.h>
extern struct loongson_system_configuration loongson_sysconf;
void cache_error_setup(void)
{
extern char __weak except_vec_cex;
set_merr_handler(0x0, &except_vec_cex, 0x80);
}
/* /* Cache operations. */
* LoongArch maintains ICache/DCache coherency by hardware,
* we just need "ibar" to avoid instruction hazard here.
*/
void local_flush_icache_range(unsigned long start, unsigned long end) void local_flush_icache_range(unsigned long start, unsigned long end)
{ {
asm volatile ("\tibar 0\n"::); asm volatile ("\tibar 0\n"::);
} }
EXPORT_SYMBOL(local_flush_icache_range);
void cache_error_setup(void) static inline void __flush_cache_line_hit(int leaf, unsigned long addr)
{ {
extern char __weak except_vec_cex; switch (leaf) {
set_merr_handler(0x0, &except_vec_cex, 0x80); case Cache_LEAF0:
cache_op(Hit_Writeback_Inv_LEAF0, addr);
break;
case Cache_LEAF1:
cache_op(Hit_Writeback_Inv_LEAF1, addr);
break;
case Cache_LEAF2:
cache_op(Hit_Writeback_Inv_LEAF2, addr);
break;
case Cache_LEAF3:
cache_op(Hit_Writeback_Inv_LEAF3, addr);
break;
case Cache_LEAF4:
cache_op(Hit_Writeback_Inv_LEAF4, addr);
break;
case Cache_LEAF5:
cache_op(Hit_Writeback_Inv_LEAF5, addr);
break;
default:
break;
}
} }
static unsigned long icache_size __read_mostly; static inline void __flush_cache_line_indexed(int leaf, unsigned long addr)
static unsigned long dcache_size __read_mostly; {
static unsigned long vcache_size __read_mostly; switch (leaf) {
static unsigned long scache_size __read_mostly; case Cache_LEAF0:
cache_op(Index_Writeback_Inv_LEAF0, addr);
break;
case Cache_LEAF1:
cache_op(Index_Writeback_Inv_LEAF1, addr);
break;
case Cache_LEAF2:
cache_op(Index_Writeback_Inv_LEAF2, addr);
break;
case Cache_LEAF3:
cache_op(Index_Writeback_Inv_LEAF3, addr);
break;
case Cache_LEAF4:
cache_op(Index_Writeback_Inv_LEAF4, addr);
break;
case Cache_LEAF5:
cache_op(Index_Writeback_Inv_LEAF5, addr);
break;
default:
break;
}
}
static char *way_string[] = { NULL, "direct mapped", "2-way", void flush_cache_line_hit(unsigned long addr)
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way", {
"9-way", "10-way", "11-way", "12-way", int leaf;
"13-way", "14-way", "15-way", "16-way", struct cache_desc *cdesc = current_cpu_data.cache_leaves;
}; unsigned int cache_present = current_cpu_data.cache_leaves_present;
/* If last level cache is inclusive, no need to flush other caches. */
leaf = cache_present - 1;
if (cache_inclusive(cdesc + leaf)) {
__flush_cache_line_hit(leaf, addr);
return;
}
for (leaf = 0; leaf < cache_present; leaf++)
__flush_cache_line_hit(leaf, addr);
}
static void probe_pcache(void) static void flush_cache_leaf(unsigned int leaf)
{
u64 line;
int i, j, nr_nodes;
struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
nr_nodes = loongson_sysconf.nr_nodes;
if (cache_private(cdesc))
nr_nodes = 1;
line = CSR_DMW0_BASE;
do {
for (i = 0; i < cdesc->sets; i++) {
for (j = 0; j < cdesc->ways; j++) {
__flush_cache_line_indexed(leaf, line);
line++;
}
line -= cdesc->ways;
line += cdesc->linesz;
}
line += 0x100000000000;
} while (--nr_nodes > 0);
}
asmlinkage __visible void cpu_flush_caches(void)
{
int leaf;
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int cache_present = current_cpu_data.cache_leaves_present;
/* If last level cache is inclusive, no need to flush other caches. */
leaf = cache_present - 1;
if (cache_inclusive(cdesc + leaf)) {
flush_cache_leaf(leaf);
return;
}
for (leaf = 0; leaf < cache_present; leaf++)
flush_cache_leaf(leaf);
}
static inline void set_cache_basics(struct cache_desc *cdesc, unsigned int leaf)
{ {
struct cpuinfo_loongarch *c = &current_cpu_data;
unsigned int lsize, sets, ways;
unsigned int config; unsigned int config;
config = read_cpucfg(LOONGARCH_CPUCFG17); config = read_cpucfg(LOONGARCH_CPUCFG17 + leaf);
lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE); cdesc->linesz = 1 << ((config & CACHE_LSIZE_M) >> CACHE_LSIZE);
sets = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS); cdesc->sets = 1 << ((config & CACHE_SETS_M) >> CACHE_SETS);
ways = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1; cdesc->ways = ((config & CACHE_WAYS_M) >> CACHE_WAYS) + 1;
}
c->icache.linesz = lsize; #define populate_cache_properties(conifg, cdesc, level, leaf) \
c->icache.sets = sets; { \
c->icache.ways = ways; if (level == 1) { \
icache_size = sets * ways * lsize; cdesc->flags |= CACHE_PRIVATE; \
c->icache.waysize = icache_size / c->icache.ways; } else { \
if (config & IUPRIV) \
cdesc->flags |= CACHE_PRIVATE; \
if (config & IUINCL) \
cdesc->flags |= CACHE_INCLUSIVE; \
} \
cdesc->flags |= CACHE_PRESENT; \
cdesc->level = level; \
set_cache_basics(cdesc, leaf); \
cdesc++; \
leaf++; \
}
config = read_cpucfg(LOONGARCH_CPUCFG18); /*
lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE); * Each level cache occupies 7bits in order in CPUCFG16
sets = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS); * except level 1 cache with bit0~2.
ways = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1; *
*/
static void probe_cache_hierarchy(void)
{
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int leaf = 0, level;
unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
c->dcache.linesz = lsize; #define IUPRE (1 << 0)
c->dcache.sets = sets; #define IUUNIFY (1 << 1)
c->dcache.ways = ways; #define IUPRIV (1 << 2)
dcache_size = sets * ways * lsize; #define IUINCL (1 << 3)
c->dcache.waysize = dcache_size / c->dcache.ways; #define DPRE (1 << 4)
#define DPRIV (1 << 5)
#define DINCL (1 << 6)
c->options |= LOONGARCH_CPU_PREFETCH; #define L1DPRE (1 << 2)
pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", for (level = 1; level <= CACHE_LEVEL_MAX; level++) {
icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz); if (config & IUPRE) {
if (config & IUUNIFY)
cdesc->type = CACHE_TYPE_UNIFIED;
else
cdesc->type = CACHE_TYPE_INST;
pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", populate_cache_properties(config, cdesc, level, leaf);
dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz); }
}
static void probe_vcache(void) if ((level == 1 && (config & L1DPRE)) ||
{ (level != 1 && (config & DPRE))) {
struct cpuinfo_loongarch *c = &current_cpu_data; cdesc->type = CACHE_TYPE_DATA;
unsigned int lsize, sets, ways;
unsigned int config;
config = read_cpucfg(LOONGARCH_CPUCFG19); populate_cache_properties(config, cdesc, level, leaf);
lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE); }
sets = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS);
ways = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1;
c->vcache.linesz = lsize; if (level == 1)
c->vcache.sets = sets; config = config >> 3;
c->vcache.ways = ways; else
vcache_size = lsize * sets * ways; config = config >> 7;
c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", if (!config)
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); break;
}
static void probe_scache(void) }
{
struct cpuinfo_loongarch *c = &current_cpu_data;
unsigned int lsize, sets, ways;
unsigned int config;
config = read_cpucfg(LOONGARCH_CPUCFG20); if (leaf > 0)
lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE); current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
sets = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS);
ways = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1;
c->scache.linesz = lsize; BUG_ON(leaf > CACHE_LEAVES_MAX);
c->scache.sets = sets;
c->scache.ways = ways;
/* 4 cores. scaches are shared */
scache_size = lsize * sets * ways;
c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", current_cpu_data.cache_leaves_present = leaf;
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
} }
void cpu_cache_init(void) void cpu_cache_init(void)
{ {
probe_pcache(); probe_cache_hierarchy();
probe_vcache();
probe_scache();
shm_align_mask = PAGE_SIZE - 1; shm_align_mask = PAGE_SIZE - 1;
} }
...@@ -2,21 +2,60 @@ ...@@ -2,21 +2,60 @@
/* /*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <linux/bitfield.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/spinlock.h>
#include <asm/branch.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/branch.h>
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
return ((unsigned long)&ex->fixup + ex->fixup);
}
static inline void regs_set_gpr(struct pt_regs *regs,
unsigned int offset, unsigned long val)
{
if (offset && offset <= MAX_REG_OFFSET)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
static bool ex_handler_fixup(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
regs->csr_era = get_ex_fixup(ex);
return true;
}
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
regs->csr_era = get_ex_fixup(ex);
return true;
}
int fixup_exception(struct pt_regs *regs) bool fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *ex;
fixup = search_exception_tables(exception_era(regs)); ex = search_exception_tables(exception_era(regs));
if (fixup) { if (!ex)
regs->csr_era = fixup->fixup; return false;
return 1; switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
} }
return 0; BUG();
} }
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz) unsigned long addr, unsigned long sz)
{ {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d; p4d_t *p4d;
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/vgaarb.h> #include <linux/vgaarb.h>
#include <asm/loongson.h> #include <asm/loongson.h>
#include <asm/cacheflush.h>
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00 #define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06 #define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
...@@ -45,12 +46,10 @@ static int __init pcibios_init(void) ...@@ -45,12 +46,10 @@ static int __init pcibios_init(void)
unsigned int lsize; unsigned int lsize;
/* /*
* Set PCI cacheline size to that of the highest level in the * Set PCI cacheline size to that of the last level in the
* cache hierarchy. * cache hierarchy.
*/ */
lsize = cpu_dcache_line_size(); lsize = cpu_last_level_cache_line_size();
lsize = cpu_vcache_line_size() ? : lsize;
lsize = cpu_scache_line_size() ? : lsize;
BUG_ON(!lsize); BUG_ON(!lsize);
......
OBJECT_FILES_NON_STANDARD_suspend_asm.o := y
obj-$(CONFIG_SUSPEND) += suspend.o suspend_asm.o
obj-$(CONFIG_HIBERNATION) += cpu.o hibernate.o hibernate_asm.o
// SPDX-License-Identifier: GPL-2.0
/*
* Suspend support specific for loongarch.
*
* Licensed under the GPLv2
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*/
#include <asm/sections.h>
#include <asm/fpu.h>
static u64 saved_crmd;
static u64 saved_prmd;
static u64 saved_euen;
static u64 saved_ecfg;
struct pt_regs saved_regs;
void save_processor_state(void)
{
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
if (is_fpu_owner())
save_fp(current);
}
void restore_processor_state(void)
{
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN);
csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG);
if (is_fpu_owner())
restore_fp(current);
}
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
// SPDX-License-Identifier: GPL-2.0
#include <asm/tlbflush.h>
extern int restore_image(void);
extern void enable_pcie_wakeup(void);
extern void swsusp_arch_save(void);
int swsusp_arch_suspend(void)
{
enable_pcie_wakeup();
swsusp_arch_save();
return 0;
}
int swsusp_arch_resume(void)
{
/* Avoid TLB mismatch during and after kernel resume */
local_flush_tlb_all();
return restore_image();
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -405,8 +405,14 @@ int acpi_pci_irq_enable(struct pci_dev *dev) ...@@ -405,8 +405,14 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* controller and must therefore be considered active high * controller and must therefore be considered active high
* as default. * as default.
*/ */
#ifdef CONFIG_LOONGARCH
int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ||
acpi_irq_model == ACPI_IRQ_MODEL_LPIC ?
ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
#else
int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ? int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW; ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
#endif
char *link = NULL; char *link = NULL;
char link_desc[16]; char link_desc[16];
int rc; int rc;
......
此差异已折叠。
...@@ -106,6 +106,7 @@ obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o ...@@ -106,6 +106,7 @@ obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
obj-$(CONFIG_LOONGSON3_ACPI_CPUFREQ) += loongson3-acpi-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册