未验证 提交 aa14d16c 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!31 update patches for sw64 architecture

These patches generally cover the following tasks:
- optimizing kernel codes, remove unused codes and correct references to header files
- fixes for perf, such as add exclude_user and exclude_kernel support, fix the number of supported raw 
  events, fix raw event count
- add support, such as ARCH_TRACEHOOK and regset, memhotplug support for guest os, sw64 interrupt 
  controller support
- modify interface, such as show_stack(), get_wchan(), save_stack_trace()
- modify kapi and uapi
- modify pt_regs structure and other related codes
- adapt device drivers, such as RX580 or R7 Series graphics card, ZX200 chipset driver

Link: https://gitee.com/openeuler/kernel/pulls/31 

From: @guzitao 
Reviewed-by: Xie XiuQi <xiexiuqi@huawei.com> 
Reviewed-by: Jiao Fenfang <jiaofenfang@uniontech.com> 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Xie XiuQi <xiexiuqi@huawei.com> 
......@@ -7,7 +7,7 @@ config SW64
select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_GENERIC_GUP
select HAVE_FAST_GUP
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_LEGACY
......@@ -68,6 +68,7 @@ config SW64
select ARCH_HAS_SG_CHAIN
select IRQ_FORCED_THREADING
select GENERIC_IRQ_MIGRATION if SMP
select HAVE_ARCH_TRACEHOOK
select HAVE_FUNCTION_TRACER
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
......@@ -233,6 +234,7 @@ config PLATFORM_XUELANG
depends on SW64_CHIP3
select SPARSE_IRQ
select SYS_HAS_EARLY_PRINTK
select SW64_INTC_V2
help
Sunway chip3 board chipset
......@@ -649,15 +651,6 @@ config ARCH_SPARSEMEM_ENABLE
depends on SMP
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_DISCONTIGMEM_ENABLE
bool "Discontiguous Memory Support"
depends on SMP
help
Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more.
config NUMA
bool "NUMA Support"
depends on SMP && !FLATMEM
......@@ -744,15 +737,10 @@ endmenu
menu "Boot options"
config SW64_IRQ_CHIP
bool
config USE_OF
bool "Flattened Device Tree support"
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select SW64_IRQ_CHIP
select OF
select IRQ_DOMAIN
help
Include support for flattened device tree machine descriptions.
......@@ -896,12 +884,4 @@ source "drivers/idle/Kconfig"
endmenu
# DUMMY_CONSOLE may be defined in drivers/video/console/Kconfig
# but we also need it if VGA_HOSE is set
config DUMMY_CONSOLE
bool
depends on VGA_HOSE
default y
source "arch/sw_64/kvm/Kconfig"
......@@ -32,12 +32,20 @@
};
intc: interrupt-controller{
intc: interrupt-controller {
compatible = "sw64,sw6_irq_controller";
interrupt-controller;
#interrupt-cells = <1>;
};
lpc_intc: interrupt-controller@0x8037 {
compatible = "sw64,lpc_intc";
reg = <0x8037 0x40000000 0x0 0x8000>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&intc>;
interrupts = <2>;
};
uart: serial0@8033 {
#address-cells = <2>;
......@@ -176,10 +184,8 @@
lpc: lpc@0x8037 {
#address-cells = <2>;
#size-cells = <2>;
compatible = "sw,sw6b_lpc";
compatible = "sunway,chip3_lpc";
reg = <0x8037 0x40000000 0x0 0x8000>;
interrupt-parent=<&intc>;
interrupts = <2>;
status = "okay";
};
......@@ -202,6 +208,8 @@
device_type = "ipmi";
compatible = "ipmi-bt";
reg = <0x8037 0x100000e4 0x0 0x10>;
interrupt-parent=<&lpc_intc>;
interrupts = <10>;
reg-size = <1>;
reg-spacing = <1>;
reg-shift = <0>;
......
......@@ -34,5 +34,17 @@
clock-frequency = <24000000>;
status = "okay";
};
misc: misc0@8036 {
#address-cells = <2>;
#size-cells = <2>;
compatible = "sw6,sunway-ged";
reg = <0x8036 0x0 0x0 0x20>;
interrupt-parent=<&intc>;
interrupts = <13>;
reg-shift = <0>;
reg-io-width = <8>;
clock-frequency = <24000000>;
status = "okay";
};
};
};
......@@ -4,5 +4,4 @@ obj-y := chip.o i2c-lib.o
obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_PCI_MSI) += msi.o vt_msi.o
obj-$(CONFIG_SW64_IRQ_CHIP) += irq_chip.o
obj-$(CONFIG_CPUFREQ_DEBUGFS) += cpufreq_debugfs.o
......@@ -54,7 +54,7 @@ static struct clocksource clocksource_longtime = {
static u64 read_vtime(struct clocksource *cs)
{
u64 result;
unsigned long vtime_addr = PAGE_OFFSET | IO_BASE | LONG_TIME;
unsigned long vtime_addr = IO_BASE | LONG_TIME;
result = rdio64(vtime_addr);
return result;
......@@ -90,6 +90,25 @@ void setup_chip_clocksource(void)
#endif
}
void set_devint_wken(int node)
{
unsigned long val;
/* enable INTD wakeup */
val = 0x80;
sw64_io_write(node, DEVINT_WKEN, val);
sw64_io_write(node, DEVINTWK_INTEN, val);
}
void set_pcieport_service_irq(int node, int index)
{
if (IS_ENABLED(CONFIG_PCIE_PME))
write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0);
if (IS_ENABLED(CONFIG_PCIEAER))
write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0);
}
static int chip3_get_cpu_nums(void)
{
unsigned long trkmode;
......@@ -159,18 +178,20 @@ int chip_pcie_configure(struct pci_controller *hose)
struct pci_bus *bus, *top;
struct list_head *next;
unsigned int max_read_size, smallest_max_payload;
int max_payloadsize, iov_bus = 0;
int max_payloadsize;
unsigned long rc_index, node;
unsigned long piuconfig0, value;
unsigned int pcie_caps_offset;
unsigned int rc_conf_value;
u16 devctl, new_values;
bool rc_ari_disabled = false, found = false;
unsigned char bus_max_num;
node = hose->node;
rc_index = hose->index;
smallest_max_payload = read_rc_conf(node, rc_index, RC_EXP_DEVCAP);
smallest_max_payload &= PCI_EXP_DEVCAP_PAYLOAD;
bus_max_num = hose->busn_space->start;
top = hose->bus;
bus = top;
......@@ -181,6 +202,7 @@ int chip_pcie_configure(struct pci_controller *hose)
/* end of this bus, go up or finish */
if (bus == top)
break;
next = bus->self->bus_list.next;
bus = bus->self->bus;
continue;
......@@ -205,10 +227,8 @@ int chip_pcie_configure(struct pci_controller *hose)
}
}
#ifdef CONFIG_PCI_IOV
if (dev->is_physfn)
iov_bus += dev->sriov->max_VF_buses - dev->bus->number;
#endif
if (bus->busn_res.end > bus_max_num)
bus_max_num = bus->busn_res.end;
/* Query device PCIe capability register */
pcie_caps_offset = dev->pcie_cap;
......@@ -287,7 +307,7 @@ int chip_pcie_configure(struct pci_controller *hose)
pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl);
}
return iov_bus;
return bus_max_num;
}
static int chip3_check_pci_vt_linkup(unsigned long node, unsigned long index)
......@@ -422,7 +442,7 @@ extern struct pci_controller *hose_head, **hose_tail;
static void sw6_handle_intx(unsigned int offset)
{
struct pci_controller *hose;
unsigned long value, pme_value, aer_value;
unsigned long value;
hose = hose_head;
for (hose = hose_head; hose; hose = hose->next) {
......@@ -435,15 +455,20 @@ static void sw6_handle_intx(unsigned int offset)
write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value);
}
pme_value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG);
aer_value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG);
if ((pme_value >> 63) || (aer_value >> 63)) {
handle_irq(hose->service_irq);
if (IS_ENABLED(CONFIG_PCIE_PME)) {
value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG);
if (value >> 63) {
handle_irq(hose->service_irq);
write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, value);
}
}
if (pme_value >> 63)
write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, pme_value);
if (aer_value >> 63)
write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, aer_value);
if (IS_ENABLED(CONFIG_PCIEAER)) {
value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG);
if (value >> 63) {
handle_irq(hose->service_irq);
write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, value);
}
}
if (hose->iommu_enable) {
......@@ -480,8 +505,8 @@ static void chip3_hose_init(struct pci_controller *hose)
hose->dense_mem_base = pci_io_base;
hose->dense_io_base = pci_io_base | PCI_LEGACY_IO;
hose->ep_config_space_base = PAGE_OFFSET | pci_io_base | PCI_EP_CFG;
hose->rc_config_space_base = PAGE_OFFSET | pci_io_base | PCI_RC_CFG;
hose->ep_config_space_base = __va(pci_io_base | PCI_EP_CFG);
hose->rc_config_space_base = __va(pci_io_base | PCI_RC_CFG);
hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO;
hose->mem_space->end = hose->mem_space->start + PCI_32BIT_MEMIO_SIZE - 1;
......@@ -677,6 +702,11 @@ void handle_chip_irq(unsigned long type, unsigned long vector,
handle_irq(type);
set_irq_regs(old_regs);
return;
case INT_VT_HOTPLUG:
old_regs = set_irq_regs(regs);
handle_irq(type);
set_irq_regs(old_regs);
return;
case INT_PC0:
perf_irq(PERFMON_PC0, regs);
return;
......
......@@ -19,6 +19,8 @@
#include <linux/errno.h>
#include <linux/fb.h>
#include <asm/sw64io.h>
#define CPLD_BUSNR 2
#ifndef _I2C_DEBUG_FLAG_
......@@ -94,7 +96,7 @@ enum i2c_bus_operation {
I2C_BUS_WRITE,
};
static uint64_t m_i2c_base_address;
static void __iomem *m_i2c_base_address;
/*
* This function get I2Cx controller base address
......@@ -102,33 +104,28 @@ static uint64_t m_i2c_base_address;
* @param i2c_controller_index Bus Number of I2C controller.
* @return I2C BAR.
*/
uint64_t get_i2c_bar_addr(uint8_t i2c_controller_index)
void __iomem *get_i2c_bar_addr(uint8_t i2c_controller_index)
{
uint64_t base_addr = 0;
if (i2c_controller_index == 0)
base_addr = PAGE_OFFSET | IO_BASE | IIC0_BASE;
else if (i2c_controller_index == 1)
base_addr = PAGE_OFFSET | IO_BASE | IIC1_BASE;
else if (i2c_controller_index == 2)
base_addr = PAGE_OFFSET | IO_BASE | IIC2_BASE;
return base_addr;
switch (i2c_controller_index) {
case 0:
return __va(IO_BASE | IIC0_BASE);
case 1:
return __va(IO_BASE | IIC1_BASE);
case 2:
return __va(IO_BASE | IIC2_BASE);
default:
return NULL;
}
}
void write_cpu_i2c_controller(uint64_t offset, uint32_t data)
static inline void write_cpu_i2c_controller(uint64_t offset, uint32_t data)
{
mb();
*(volatile uint32_t *)(m_i2c_base_address + offset) = data;
writel(data, m_i2c_base_address + offset);
}
uint32_t read_cpu_i2c_controller(uint64_t offset)
static inline uint32_t read_cpu_i2c_controller(uint64_t offset)
{
uint32_t data;
data = *(volatile uint32_t *)(m_i2c_base_address + offset);
mb();
return data;
return readl(m_i2c_base_address + offset);
}
static int poll_for_status_set0(uint16_t status_bit)
......@@ -239,7 +236,7 @@ static int i2c_read(uint8_t reg_offset, uint8_t *buffer, uint32_t length)
write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD);
if (poll_for_status_set0(DW_IC_STATUS_RFNE) == 0)
buffer[i] = *(uint8_t *) (m_i2c_base_address + DW_IC_DATA_CMD);
buffer[i] = readb(m_i2c_base_address + DW_IC_DATA_CMD);
else
pr_err("Read timeout line %d.\n", __LINE__);
}
......
# SPDX-License-Identifier: GPL-2.0
header-y += compiler.h
header-y += console.h
header-y += fpu.h
header-y += gentrap.h
header-y += hmcall.h
header-y += reg.h
header-y += regdef.h
header-y += sysinfo.h
header-y += page.h
header-y += elf.h
generated-y += syscall_table.h
generic-y += clkdev.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += rwsem.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += param.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += mcs_spinlock.h
generic-y += clkdev.h
generic-y += scatterlist.h
generic-y += rwsem.h
generic-y += seccomp.h
generic-y += segment.h
generic-y += types.h
generic-y += user.h
generated-y += syscall_table.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_AGP_H
#define _ASM_SW64_AGP_H 1
#include <asm/io.h>
/* dummy for now */
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif
......@@ -4,7 +4,6 @@
#include <linux/spinlock.h>
#include <asm/checksum.h>
#include <asm/console.h>
#include <asm/page.h>
#include <asm/string.h>
#include <linux/uaccess.h>
......
......@@ -5,9 +5,7 @@
#ifndef _ASM_SW64_CACHE_H
#define _ASM_SW64_CACHE_H
#define L1_CACHE_BYTES 128
#define L1_CACHE_SHIFT 7
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_COMPILER_H
#define _ASM_SW64_COMPILER_H
#include <uapi/asm/compiler.h>
#endif /* _ASM_SW64_COMPILER_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_CONSOLE_H
#define _ASM_SW64_CONSOLE_H
#include <uapi/asm/console.h>
#ifndef __ASSEMBLY__
struct crb_struct;
extern int callback_init_done;
extern void callback_init(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SW64_CONSOLE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_DIV64_H
#define _ASM_SW64_DIV64_H
#include <asm-generic/div64.h>
#endif
......@@ -3,7 +3,6 @@
#define _ASM_SW64_ELF_H
#ifdef __KERNEL__
#include <asm/auxvec.h>
#include <asm/special_insns.h>
#endif
/* Special values for the st_other field in the symbol table. */
......@@ -56,23 +55,18 @@
#define EF_SW64_32BIT 1 /* All addresses are below 2GB */
/*
* ELF register definitions..
*/
/*
* The legacy version of <sys/procfs.h> makes gregset_t 46 entries long.
* I have no idea why that is so. For now, we just leave it at 33
* (32 general regs + processor status word).
* ELF register definitions.
*
* For now, we just leave it at 33 (32 general regs + processor status word).
*/
#define ELF_NGREG 33
#define ELF_NFPREG 32
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/* Same with user_fpsimd_state */
#include <uapi/asm/ptrace.h>
typedef struct user_fpsimd_state elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
......@@ -122,30 +116,16 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#ifdef __KERNEL__
struct pt_regs;
struct thread_info;
struct task_struct;
extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt,
struct thread_info *ti);
#define ELF_CORE_COPY_REGS(DEST, REGS) \
dump_elf_thread(DEST, REGS, current_thread_info());
/* Similar, but for a thread other than current. */
extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
#define ELF_CORE_COPY_TASK_REGS(TASK, DEST) dump_elf_task(*(DEST), TASK)
/* Similar, but for the FP registers. */
extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
#define ELF_CORE_COPY_FPREGS(TASK, DEST) dump_elf_task_fp(*(DEST), TASK)
extern void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *pt);
#define ELF_CORE_COPY_REGS(DEST, REGS) sw64_elf_core_copy_regs(DEST, REGS);
/*
* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This is trivial on SW-64,
* but not so on other machines.
* instruction set this CPU supports.
*/
#define ELF_HWCAP (~amask(-1))
#define ELF_HWCAP 0
/*
* This yields a string that ld.so will use to load implementation
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_EMERGENCY_RESTART_H
#define _ASM_SW64_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_SW64_EMERGENCY_RESTART_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_EXEC_H
#define _ASM_SW64_EXEC_H
#define arch_align_stack(x) (x)
#endif /* _ASM_SW64_EXEC_H */
......@@ -52,4 +52,8 @@ struct exception_table_entry {
(b)->fixup.unit = (tmp).fixup.unit; \
} while (0)
/* Macro for exception fixup code to access integer registers. */
extern short regoffsets[];
#define map_regs(r) (*(unsigned long *)((char *)regs + regoffsets[r]))
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995
*/
#ifndef _ASM_SW64_FLOPPY_H
#define _ASM_SW64_FLOPPY_H
#define fd_inb(port) inb_p(port)
#define fd_outb(value, port) outb_p(value, port)
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_disable_dma() disable_dma(FLOPPY_DMA)
#define fd_request_dma() request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode)
#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA, virt_to_bus(addr))
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr, size) /* nothing */
#define fd_request_irq() \
request_irq(FLOPPY_IRQ, floppy_interrupt, 0, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#ifdef CONFIG_PCI
#include <linux/pci.h>
#define fd_dma_setup(addr, size, mode, io) \
sw64_fd_dma_setup(addr, size, mode, io)
static inline int
sw64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
{
static unsigned long prev_size;
static dma_addr_t bus_addr;
static char *prev_addr;
static int prev_dir;
int dir;
dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
if (bus_addr
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) {
/* different from last time -- unmap prev */
bus_addr = 0;
}
if (!bus_addr) /* need to map it */
bus_addr = virt_to_bus(addr);
/* remember this one as prev */
prev_addr = addr;
prev_size = size;
prev_dir = dir;
fd_clear_dma_ff();
fd_cacheflush(addr, size);
fd_set_dma_mode(mode);
set_dma_addr(FLOPPY_DMA, bus_addr);
fd_set_dma_count(size);
virtual_dma_port = io;
fd_enable_dma();
return 0;
}
#endif /* CONFIG_PCI */
inline void virtual_dma_init(void)
{
/* Nothing to do on an sw64 */
}
static int FDC1 = 0x3f0;
static int FDC2 = -1;
/*
* Again, the CMOS information doesn't work on the sw64..
*/
#define FLOPPY0_TYPE 6
#define FLOPPY1_TYPE 0
#define N_FDC 2
#define N_DRIVE 8
/*
* Most sw64s have no problems with floppy DMA crossing 64k borders,
* except for certain ones, like XL and RUFFIAN.
*
* However, the test is simple and fast, and this *is* floppy, after all,
* so we do it for all platforms, just to make sure.
*
* This is advantageous in other circumstances as well, as in moving
* about the PCI DMA windows and forcing the floppy to start doing
* scatter-gather when it never had before, and there *is* a problem
* on that platform... ;-}
*/
static inline unsigned long CROSS_64KB(void *a, unsigned long s)
{
unsigned long p = (unsigned long)a;
return ((p + s - 1) ^ p) & ~0xffffUL;
}
#define EXTRA_FLOPPY_PARAMS
#endif /* __ASM_SW64_FLOPPY_H */
......@@ -18,6 +18,7 @@ enum HCALL_TYPE {
HCALL_SWNET = 20, /* guest request swnet service */
HCALL_SWNET_IRQ = 21, /* guest request swnet intr */
HCALL_FATAL_ERROR = 22, /* guest fatal error, issued by hmcode */
HCALL_MEMHOTPLUG = 23, /* guest memory hotplug event */
NR_HCALL
};
......
......@@ -64,15 +64,6 @@ static inline void * __deprecated bus_to_virt(unsigned long address)
}
#define isa_bus_to_virt bus_to_virt
/*
* There are different chipsets to interface the sw64 CPUs to the world.
*/
#define IO_CONCAT(a, b) _IO_CONCAT(a, b)
#define _IO_CONCAT(a, b) a ## _ ## b
#include <asm/sw64io.h>
/*
* Generic IO read/write. These perform native-endian accesses.
*/
......@@ -184,14 +175,6 @@ extern void outb(u8 b, unsigned long port);
extern void outw(u16 b, unsigned long port);
extern void outl(u32 b, unsigned long port);
/*
* Mapping from port numbers to __iomem space is pretty easy.
*/
static inline void __iomem *ioportmap(unsigned long addr)
{
return sw64_platform->ioportmap(addr);
}
static inline void __iomem *__ioremap(phys_addr_t addr, size_t size,
pgprot_t prot)
{
......@@ -211,22 +194,6 @@ static inline void __iounmap(volatile void __iomem *addr)
#define iounmap __iounmap
static inline int __is_ioaddr(unsigned long addr)
{
return addr >= (PAGE_OFFSET | IO_BASE);
}
#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
static inline int __is_mmio(const volatile void __iomem *xaddr)
{
unsigned long addr = (unsigned long)xaddr;
return (addr & 0x100000000UL) == 0;
}
#define ioread16be(p) be16_to_cpu(ioread16(p))
#define ioread32be(p) be32_to_cpu(ioread32(p))
#define iowrite16be(v, p) iowrite16(cpu_to_be16(v), (p))
......
......@@ -11,6 +11,8 @@
#include <linux/irq.h>
#include <linux/profile.h>
#include <asm/sw64io.h>
#define SW64_PCIE0_INT_BASE 17
#define SW64_PCIE0_MSI_BASE 21
......@@ -30,6 +32,7 @@ enum sw64_irq_type {
INT_RTC = 9,
INT_FAULT = 10,
INT_VT_SERIAL = 12,
INT_VT_HOTPLUG = 13,
INT_DEV = 17,
INT_NMI = 18,
INT_LEGACY = 31,
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_IRQ_REGS_H
#define _ASM_SW64_IRQ_REGS_H
#include <asm-generic/irq_regs.h>
#endif
......@@ -5,14 +5,6 @@
#include <asm/hmcall.h>
#define IPL_MIN 0
#define IPL_SW0 1
#define IPL_SW1 2
#define IPL_DEV0 3
#define IPL_DEV1 4
#define IPL_TIMER 5
#define IPL_PERF 6
#define IPL_POWERFAIL 6
#define IPL_MCHECK 7
#define IPL_MAX 7
#define getipl() (rdps() & 7)
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_KMAP_TYPES_H
#define _ASM_SW64_KMAP_TYPES_H
/* Dummy header just to define km_type. */
#ifdef CONFIG_DEBUG_HIGHMEM
#define __WITH_KM_FENCE
#endif
#include <asm-generic/kmap_types.h>
#undef __WITH_KM_FENCE
#endif
......@@ -11,4 +11,7 @@
#define SW64_KVM_EXIT_RESTART 17
#define SW64_KVM_EXIT_FATAL_ERROR 22
#ifdef CONFIG_KVM_MEMHOTPLUG
#define SW64_KVM_EXIT_MEMHOTPLUG 23
#endif
#endif /* _ASM_SW64_KVM_ASM_H */
......@@ -29,7 +29,7 @@
#include <asm/kvm_mmio.h>
#define KVM_MAX_VCPUS 64
#define KVM_USER_MEM_SLOTS 512
#define KVM_USER_MEM_SLOTS 64
#define KVM_HALT_POLL_NS_DEFAULT 0
#define KVM_IRQCHIP_NUM_PINS 256
......@@ -42,12 +42,16 @@
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
struct kvm_arch_memory_slot {
unsigned long host_phys_addr;
bool valid;
};
struct kvm_arch {
unsigned long host_phys_addr;
unsigned long size;
/* segment table */
unsigned long *seg_pgd;
};
......@@ -100,6 +104,9 @@ struct kvm_vcpu_stat {
u64 halt_poll_invalid;
};
#ifdef CONFIG_KVM_MEMHOTPLUG
void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr);
#endif
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index, struct hcall_args *hargs);
void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_LOCAL_H
#define _ASM_SW64_LOCAL_H
#include <linux/percpu.h>
#include <linux/atomic.h>
typedef struct {
atomic_long_t a;
} local_t;
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local_read(l) atomic_long_read(&(l)->a)
#define local_set(l, i) atomic_long_set(&(l)->a, (i))
#define local_inc(l) atomic_long_inc(&(l)->a)
#define local_dec(l) atomic_long_dec(&(l)->a)
#define local_add(i, l) atomic_long_add((i), (&(l)->a))
#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
static inline long local_add_return(long i, local_t *l)
{
long temp1, temp2, result, addr;
__asm__ __volatile__(
#ifdef CONFIG_LOCK_MEMB
" memb\n"
#endif
" ldi %4, %2\n"
"1: lldl %0, 0(%4)\n"
" ldi %1, 1\n"
" wr_f %1\n"
" addl %0, %5, %3\n"
" addl %0, %5, %0\n"
#ifdef CONFIG_LOCK_FIXUP
" memb\n"
#endif
" lstl %0, 0(%4)\n"
" rd_f %0\n"
" beq %0, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (temp1), "=&r" (temp2), "=m" (l->a.counter),
"=&r" (result), "=&r" (addr)
: "Ir" (i), "m" (l->a.counter) : "memory");
return result;
}
static inline long local_sub_return(long i, local_t *l)
{
long temp1, temp2, result, addr;
__asm__ __volatile__(
#ifdef CONFIG_LOCK_MEMB
" memb\n"
#endif
" ldi %4, %2\n"
"1: lldl %0, 0(%4)\n"
" ldi %1, 1\n"
" wr_f %1\n"
" subl %0, %5, %3\n"
" subl %0, %5, %0\n"
#ifdef CONFIG_LOCK_FIXUP
" memb\n"
#endif
" lstl %0, 0(%4)\n"
" rd_f %0\n"
" beq %0, 2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (temp1), "=&r" (temp2), "=m" (l->a.counter),
"=&r" (result), "=&r" (addr)
: "Ir" (i), "m" (l->a.counter) : "memory");
return result;
}
#define local_cmpxchg(l, o, n) \
(cmpxchg_local(&((l)->a.counter), (o), (n)))
#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
/**
* local_add_unless - add unless the number is a given value
* @l: pointer of type local_t
* @a: the amount to add to l...
* @u: ...unless l is equal to u.
*
* Atomically adds @a to @l, so long as it was not @u.
* Returns non-zero if @l was not @u, and zero otherwise.
*/
#define local_add_unless(l, a, u) \
({ \
long c, old; \
c = local_read(l); \
for (;;) { \
if (unlikely(c == (u))) \
break; \
old = local_cmpxchg((l), c, c + (a)); \
if (likely(old == c)) \
break; \
c = old; \
} \
c != (u); \
})
#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
#define local_dec_return(l) local_sub_return(1, (l))
#define local_inc_return(l) local_add_return(1, (l))
#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0)
#define local_inc_and_test(l) (local_add_return(1, (l)) == 0)
#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
/* Verify if faster than atomic ops */
#define __local_inc(l) ((l)->a.counter++)
#define __local_dec(l) ((l)->a.counter++)
#define __local_add(i, l) ((l)->a.counter += (i))
#define __local_sub(i, l) ((l)->a.counter -= (i))
#endif /* _ASM_SW64_LOCAL_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_LOCAL64_H
#define _ASM_SW64_LOCAL64_H
#include <asm-generic/local64.h>
#endif
......@@ -6,6 +6,7 @@
#include <linux/numa.h>
#endif
#define MIN_MEMORY_BLOCK_SIZE_VM_MEMHP (1UL << 30)
#define NODE0_START (_TEXT_START - __START_KERNEL_map)
#define MAX_PHYSMEM_BITS 48
......
......@@ -131,7 +131,7 @@ switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
* Always update the PCB PTBR. If next is kernel thread, it must
* update PTBR. If next is user process, it's ok to update PTBR.
*/
task_thread_info(next)->pcb.ptbr = (__pa(next_mm->pgd)) >> PAGE_SHIFT;
task_thread_info(next)->pcb.ptbr = virt_to_pfn(next_mm->pgd);
load_asn_ptbr(task_thread_info(next)->pcb.asn, task_thread_info(next)->pcb.ptbr);
}
......@@ -170,8 +170,7 @@ static inline int init_new_context(struct task_struct *tsk,
for_each_possible_cpu(i)
mm->context.asid[i] = 0;
if (tsk != current)
task_thread_info(tsk)->pcb.ptbr
= (__pa(mm->pgd)) >> PAGE_SHIFT;
task_thread_info(tsk)->pcb.ptbr = virt_to_pfn(mm->pgd);
return 0;
}
......@@ -183,8 +182,7 @@ static inline void destroy_context(struct mm_struct *mm)
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
task_thread_info(tsk)->pcb.ptbr
= (__pa(mm->pgd)) >> PAGE_SHIFT;
task_thread_info(tsk)->pcb.ptbr = virt_to_pfn(mm->pgd);
}
static inline int arch_dup_mmap(struct mm_struct *oldmm,
......
......@@ -14,34 +14,4 @@ extern pg_data_t *node_data[];
#define NODE_DATA(nid) (node_data[(nid)])
#endif
#ifdef CONFIG_DISCONTIGMEM
extern int pa_to_nid(unsigned long pa);
extern int pfn_valid(unsigned long pfn);
#define mk_pte(page, pgprot) \
({ \
pte_t pte; \
unsigned long pfn; \
\
pfn = page_to_pfn(page) << _PTE_FLAGS_BITS; \
pte_val(pte) = pfn | pgprot_val(pgprot); \
\
pte; \
})
#define pte_page(x) \
({ \
unsigned long kvirt; \
struct page *__xx; \
\
kvirt = (unsigned long)__va(pte_val(x) >> (_PTE_FLAGS_BITS-PAGE_SHIFT));\
__xx = virt_to_page(kvirt); \
\
__xx; \
})
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
#endif /* CONFIG_DISCONTIGMEM */
#endif /* _ASM_SW64_MMZONE_H */
......@@ -2,18 +2,12 @@
#ifndef _ASM_SW64_MODULE_H
#define _ASM_SW64_MODULE_H
#include <asm-generic/module.h>
struct mod_arch_specific {
unsigned int gotsecindex;
};
#define Elf_Sym Elf64_Sym
#define Elf_Shdr Elf64_Shdr
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Phdr Elf64_Phdr
#define Elf_Dyn Elf64_Dyn
#define Elf_Rel Elf64_Rel
#define Elf_Rela Elf64_Rela
#define ARCH_SHF_SMALL SHF_SW64_GPREL
#ifdef MODULE
......
......@@ -46,10 +46,13 @@ extern unsigned long __phys_addr(unsigned long);
#endif
#define __pa(x) __phys_addr((unsigned long)(x))
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define __va(x) ((void *)((unsigned long) (x) | PAGE_OFFSET))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_pfn(vaddr) (PHYS_PFN(__pa(vaddr)))
#define pfn_to_virt(pfn) (__va(PFN_PHYS(pfn)))
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_PARAM_H
#define _ASM_SW64_PARAM_H
#include <uapi/asm/param.h>
#undef HZ
#define HZ CONFIG_HZ
#define USER_HZ 100
#define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */
#endif /* _ASM_SW64_PARAM_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* parport.h: platform-specific PC-style parport initialisation
*
* Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
*
* This file should only be included by drivers/parport/parport_pc.c.
*/
#ifndef _ASM_SW64_PARPORT_H
#define _ASM_SW64_PARPORT_H
static int parport_pc_find_isa_ports(int autoirq, int autodma);
static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
{
return parport_pc_find_isa_ports(autoirq, autodma);
}
#endif /* !(_ASM_SW64_PARPORT_H) */
......@@ -34,8 +34,8 @@ struct pci_controller {
unsigned long dense_io_base;
/* This one's for the kernel only. It's in KSEG somewhere. */
unsigned long ep_config_space_base;
unsigned long rc_config_space_base;
void __iomem *ep_config_space_base;
void __iomem *rc_config_space_base;
unsigned long index;
unsigned long node;
......
......@@ -15,7 +15,7 @@
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
pmd_set(pmd, (pte_t *)__va(page_to_pa(pte)));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
......
......@@ -119,9 +119,8 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
#define _PFN_MASK 0xFFFFFFFFF0000000UL
#define _PFN_BITS 36
#define _PTE_FLAGS_BITS (64 - _PFN_BITS)
#define _PFN_SHIFT 28
#define _PFN_MASK ((-1UL) << _PFN_SHIFT)
#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_SPECIAL)
......@@ -181,53 +180,19 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8 * sizeof(unsigned long))
/* to align the pointer to a pointer address */
#define PTR_MASK (~(sizeof(void *) - 1))
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
#define SIZEOF_PTR_LOG2 3
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
((unsigned long)(address) >> (PAGE_SHIFT - SIZEOF_PTR_LOG2) & PTR_MASK & ~PAGE_MASK)
#define PHYS_TWIDDLE(pfn) (pfn)
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pmd_pfn(pmd) (pmd_val(pmd) >> _PTE_FLAGS_BITS)
#define pte_pfn(pte) (pte_val(pte) >> _PTE_FLAGS_BITS)
#ifndef CONFIG_DISCONTIGMEM
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \
({ \
pte_t pte; \
\
pte_val(pte) = (page_to_pfn(page) << _PTE_FLAGS_BITS) | pgprot_val(pgprot); \
pte; \
})
#endif
static inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
pte_t pte;
pte_val(pte) = (PHYS_TWIDDLE(physpfn) << _PTE_FLAGS_BITS) | pgprot_val(pgprot);
pte_val(pte) = (pfn << _PFN_SHIFT) | pgprot_val(prot);
return pte;
}
static inline pmd_t pfn_pmd(unsigned long physpfn, pgprot_t pgprot)
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
pmd_t pmd;
pmd_val(pmd) = (PHYS_TWIDDLE(physpfn) << _PTE_FLAGS_BITS) | pgprot_val(pgprot);
pmd_val(pmd) = (pfn << _PFN_SHIFT) | pgprot_val(prot);
return pmd;
}
......@@ -245,37 +210,48 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
pmd_val(*pmdp) = _PAGE_TABLE | (__pa(ptep) << (_PTE_FLAGS_BITS - PAGE_SHIFT));
pmd_val(*pmdp) = _PAGE_TABLE | (virt_to_pfn(ptep) << _PFN_SHIFT);
}
static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
{
pud_val(*pudp) = _PAGE_TABLE | (__pa(pmdp) << (_PTE_FLAGS_BITS - PAGE_SHIFT));
pud_val(*pudp) = _PAGE_TABLE | (virt_to_pfn(pmdp) << _PFN_SHIFT);
}
static inline void p4d_set(p4d_t *p4dp, pud_t *pudp)
{
p4d_val(*p4dp) = _PAGE_TABLE | (__pa(pudp) << (_PTE_FLAGS_BITS - PAGE_SHIFT));
p4d_val(*p4dp) = _PAGE_TABLE | (virt_to_pfn(pudp) << _PFN_SHIFT);
}
static inline unsigned long
pmd_page_vaddr(pmd_t pmd)
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return ((pmd_val(pmd) & _PFN_MASK) >> (_PTE_FLAGS_BITS-PAGE_SHIFT)) + PAGE_OFFSET;
return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PFN_SHIFT);
}
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> _PTE_FLAGS_BITS))
#define pud_page(pud) (pfn_to_page(pud_val(pud) >> _PTE_FLAGS_BITS))
#define p4d_page(p4d) (pfn_to_page(p4d_val(p4d) >> _PTE_FLAGS_BITS))
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pmd_pfn(pmd) (pmd_val(pmd) >> _PFN_SHIFT)
#define pte_pfn(pte) (pte_val(pte) >> _PFN_SHIFT)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> _PFN_SHIFT))
#define pud_page(pud) (pfn_to_page(pud_val(pud) >> _PFN_SHIFT))
#define p4d_page(p4d) (pfn_to_page(p4d_val(p4d) >> _PFN_SHIFT))
static inline pud_t *p4d_pgtable(p4d_t p4d)
{
return (pud_t *)(PAGE_OFFSET + ((p4d_val(p4d) & _PFN_MASK) >> (_PTE_FLAGS_BITS-PAGE_SHIFT)));
return (pud_t *)pfn_to_virt(p4d_val(p4d) >> _PFN_SHIFT);
}
static inline pmd_t *pud_pgtable(pud_t pud)
{
return (pmd_t *)(PAGE_OFFSET + ((pud_val(pud) & _PFN_MASK) >> (_PTE_FLAGS_BITS-PAGE_SHIFT)));
return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PFN_SHIFT);
}
static inline int pte_none(pte_t pte)
......@@ -566,7 +542,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
set_bit(_PAGE_BIT_FOW, (unsigned long *)pmdp);
}
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), (prot))
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
......@@ -586,15 +562,6 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
#define PAGE_DIR_OFFSET(tsk, address) pgd_offset((tsk), (address))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
/* to find an entry in a page-table-directory. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
extern pgd_t swapper_pg_dir[1024];
/*
......@@ -629,14 +596,7 @@ extern pgd_t swapper_pg_dir[1024];
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#if defined(CONFIG_FLATMEM)
#define kern_addr_valid(addr) (1)
#elif defined(CONFIG_DISCONTIGMEM)
/* XXX: FIXME -- wli */
#define kern_addr_valid(kaddr) (0)
#elif defined(CONFIG_SPARSEMEM)
#define kern_addr_valid(addr) (1)
#endif
#define pte_ERROR(e) \
pr_err("%s: %d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_PREEMPT_H
#define _ASM_SW64_PREEMPT_H
#include <asm-generic/preempt.h>
#endif /* _ASM_SW64_PREEMPT_H */
......@@ -9,6 +9,10 @@
#define _ASM_SW64_PROCESSOR_H
#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
#include <asm/ptrace.h>
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + 2 * PAGE_SIZE) - 1)
/*
* Returns current instruction pointer ("program counter").
......@@ -37,47 +41,12 @@
#define TASK_UNMAPPED_BASE \
((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : UNMAPPED_BASE)
typedef struct {
unsigned long seg;
} mm_segment_t;
struct context_fpregs {
unsigned long f0[4];
unsigned long f1[4];
unsigned long f2[4];
unsigned long f3[4];
unsigned long f4[4];
unsigned long f5[4];
unsigned long f6[4];
unsigned long f7[4];
unsigned long f8[4];
unsigned long f9[4];
unsigned long f10[4];
unsigned long f11[4];
unsigned long f12[4];
unsigned long f13[4];
unsigned long f14[4];
unsigned long f15[4];
unsigned long f16[4];
unsigned long f17[4];
unsigned long f18[4];
unsigned long f19[4];
unsigned long f20[4];
unsigned long f21[4];
unsigned long f22[4];
unsigned long f23[4];
unsigned long f24[4];
unsigned long f25[4];
unsigned long f26[4];
unsigned long f27[4];
unsigned long f28[4];
unsigned long f29[4];
unsigned long f30[4];
} __aligned(32); /* 256 bits aligned for simd */
struct thread_struct {
struct context_fpregs ctx_fp;
unsigned long fpcr;
struct user_fpsimd_state fpstate;
/* Callee-saved registers */
unsigned long ra;
unsigned long sp;
unsigned long s[7]; /* s0 ~ s6 */
};
#define INIT_THREAD { }
......
......@@ -3,12 +3,57 @@
#define _ASM_SW64_PTRACE_H
#include <uapi/asm/ptrace.h>
#include <linux/sched/task_stack.h>
#include <asm/hmcall.h>
#include <asm/thread_info.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry
*/
struct pt_regs {
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
/* r16 ~ r18 saved by hmcode */
unsigned long r19;
unsigned long r20;
unsigned long r21;
unsigned long r22;
unsigned long r23;
unsigned long r24;
unsigned long r25;
unsigned long r26;
unsigned long r27;
unsigned long r28;
unsigned long hae;
/* JRP - These are the values provided to a0-a2 by HMcode */
unsigned long trap_a0;
unsigned long trap_a1;
unsigned long trap_a2;
/* These are saved by HMcode: */
unsigned long ps;
unsigned long pc;
unsigned long gp;
unsigned long r16;
unsigned long r17;
unsigned long r18;
};
#define arch_has_single_step() (1)
#define user_mode(regs) (((regs)->ps & 8) != 0)
#define instruction_pointer(regs) ((regs)->pc)
......@@ -18,8 +63,6 @@
#define kernel_stack_pointer(regs) (((regs->ps) >> 4) & (TASK_SIZE - 1))
#define instruction_pointer_set(regs, val) ((regs)->pc = val)
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + 2 * PAGE_SIZE) - 1)
#define current_pt_regs() \
((struct pt_regs *) ((char *)current_thread_info() + 2 * PAGE_SIZE) - 1)
......@@ -28,6 +71,9 @@
#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
#define MAX_REG_OFFSET (offsetof(struct pt_regs, r18))
extern short regoffsets[];
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/sw_64/include/asm/seccomp.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_SW64_SECCOMP_H
#define _ASM_SW64_SECCOMP_H
#include <asm/unistd.h>
#include <asm-generic/seccomp.h>
#endif /* _ASM_SW64_SECCOMP_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_SECTIONS_H
#define _ASM_SW64_SECTIONS_H
/* nothing to see, move along */
#include <asm-generic/sections.h>
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_SEGMENT_H
#define _ASM_SW64_SEGMENT_H
/* Only here because we have some old header files that expect it.. */
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_SERIAL_H
#define _ASM_SW64_SERIAL_H
#define BASE_BAUD (1843200 / 16)
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
#define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ)
#else
#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF
#endif
#endif /* _ASM_SW64_SERIAL_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_SHMPARAM_H
#define _ASM_SW64_SHMPARAM_H
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif /* _ASM_SW64_SHMPARAM_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_SPECIAL_INSNS_H
#define _ASM_SW64_SPECIAL_INSNS_H
enum amask_enum {
AMASK_BWX = (1UL << 0),
AMASK_FIX = (1UL << 1),
AMASK_CIX = (1UL << 2),
AMASK_MAX = (1UL << 8),
AMASK_PRECISE_TRAP = (1UL << 9),
};
#define amask(mask) \
({ \
unsigned long __amask, __input = (mask); \
__asm__ ("mov %1, %0" : "=r"(__amask) : "rI"(__input)); \
__amask; \
})
#endif /* _ASM_SW64_SPECIAL_INSNS_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_STACKTRACE_H
#define _ASM_SW64_STACKTRACE_H
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
struct stackframe {
unsigned long pc;
unsigned long fp;
};
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
};
struct stack_info {
unsigned long low;
unsigned long high;
enum stack_type type;
};
/* The form of the top of the frame on the stack */
struct stack_frame {
unsigned long return_address;
struct stack_frame *next_frame;
};
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs,
int (*fn)(unsigned long, void *), void *data);
static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
if (sp < low || sp >= high)
return false;
if (info) {
info->low = low;
info->high = high;
info->type = STACK_TYPE_TASK;
}
return true;
}
/*
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
static inline bool on_accessible_stack(struct task_struct *tsk,
unsigned long sp,
struct stack_info *info)
{
if (on_task_stack(tsk, sp, info))
return true;
if (tsk != current || preemptible())
return false;
return false;
}
#endif /* _ASM_SW64_STACKTRACE_H */
......@@ -5,6 +5,7 @@
#include <linux/cpu.h>
#include <linux/pci.h>
#include <asm/sw64io.h>
struct sw64_early_init_ops {
void (*setup_core_start)(struct cpumask *cpumask);
......
......@@ -2,6 +2,7 @@
#ifndef _ASM_SW64_SW64IO_H
#define _ASM_SW64_SW64IO_H
#include <asm/io.h>
#include <asm/page.h>
extern void setup_chip_clocksource(void);
......@@ -11,105 +12,87 @@ extern void setup_chip_clocksource(void);
#endif
#define MK_RC_CFG(nid, idx) \
(PAGE_OFFSET | SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG)
(SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG)
#define MK_PIU_IOR0(nid, idx) \
(PAGE_OFFSET | SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE)
(SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE)
#define MK_PIU_IOR1(nid, idx) \
(PAGE_OFFSET | SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE)
(SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE)
static inline unsigned int
read_rc_conf(unsigned long node, unsigned long rc_index,
unsigned int conf_offset)
read_rc_conf(unsigned long node, unsigned long rc,
unsigned int offset)
{
unsigned long addr;
unsigned int value;
void __iomem *addr;
addr = MK_RC_CFG(node, rc_index) | conf_offset;
value = *(volatile unsigned int *)addr;
mb();
return value;
addr = __va(MK_RC_CFG(node, rc) | offset);
return readl(addr);
}
static inline void
write_rc_conf(unsigned long node, unsigned long rc_index,
unsigned int conf_offset, unsigned int data)
write_rc_conf(unsigned long node, unsigned long rc,
unsigned int offset, unsigned int data)
{
unsigned long addr;
void __iomem *addr;
addr = MK_RC_CFG(node, rc_index) | conf_offset;
*(unsigned int *)addr = data;
mb();
addr = __va(MK_RC_CFG(node, rc) | offset);
writel(data, addr);
}
static inline unsigned long
read_piu_ior0(unsigned long node, unsigned long rc_index,
read_piu_ior0(unsigned long node, unsigned long rc,
unsigned int reg)
{
unsigned long addr;
unsigned long value;
addr = MK_PIU_IOR0(node, rc_index) + reg;
value = *(volatile unsigned long __iomem *)addr;
mb();
void __iomem *addr;
return value;
addr = __va(MK_PIU_IOR0(node, rc) + reg);
return readq(addr);
}
static inline void
write_piu_ior0(unsigned long node, unsigned long rc_index,
write_piu_ior0(unsigned long node, unsigned long rc,
unsigned int reg, unsigned long data)
{
unsigned long addr;
void __iomem *addr;
addr = MK_PIU_IOR0(node, rc_index) + reg;
*(unsigned long __iomem *)addr = data;
mb();
addr = __va(MK_PIU_IOR0(node, rc) + reg);
writeq(data, addr);
}
static inline unsigned long
read_piu_ior1(unsigned long node, unsigned long rc_index,
read_piu_ior1(unsigned long node, unsigned long rc,
unsigned int reg)
{
unsigned long addr, value;
void __iomem *addr;
addr = MK_PIU_IOR1(node, rc_index) + reg;
value = *(volatile unsigned long __iomem *)addr;
mb();
return value;
addr = __va(MK_PIU_IOR1(node, rc) + reg);
return readq(addr);
}
static inline void
write_piu_ior1(unsigned long node, unsigned long rc_index,
write_piu_ior1(unsigned long node, unsigned long rc,
unsigned int reg, unsigned long data)
{
unsigned long addr;
void __iomem *addr;
addr = MK_PIU_IOR1(node, rc_index) + reg;
*(volatile unsigned long __iomem *)addr = data;
mb();
addr = __va(MK_PIU_IOR1(node, rc) + reg);
writeq(data, addr);
}
static inline unsigned long
sw64_io_read(unsigned long node, unsigned long reg)
{
unsigned long addr, value;
addr = PAGE_OFFSET | SW64_IO_BASE(node) | reg;
value = *(volatile unsigned long __iomem *)addr;
mb();
void __iomem *addr;
return value;
addr = __va(SW64_IO_BASE(node) | reg);
return readq(addr);
}
static inline void
sw64_io_write(unsigned long node, unsigned long reg, unsigned long data)
{
unsigned long addr;
void __iomem *addr;
addr = PAGE_OFFSET | SW64_IO_BASE(node) | reg;
*(volatile unsigned long __iomem *)addr = data;
mb();
addr = __va(SW64_IO_BASE(node) | reg);
writeq(data, addr);
}
#endif
......@@ -2,12 +2,41 @@
#ifndef _ASM_SW64_SWITCH_TO_H
#define _ASM_SW64_SWITCH_TO_H
struct task_struct;
extern struct task_struct *__switch_to(unsigned long, struct task_struct *);
#include<linux/sched.h>
extern void __fpstate_save(struct task_struct *save_to);
extern void __fpstate_restore(struct task_struct *restore_from);
extern struct task_struct *__switch_to(unsigned long pcb,
struct task_struct *prev, struct task_struct *next);
extern void restore_da_match_after_sched(void);
#define switch_to(P, N, L) \
static inline void fpstate_save(struct task_struct *task)
{
if (likely(!(task->flags & PF_KTHREAD)))
__fpstate_save(task);
}
static inline void fpstate_restore(struct task_struct *task)
{
if (likely(!(task->flags & PF_KTHREAD)))
__fpstate_restore(task);
}
static inline void __switch_to_aux(struct task_struct *prev,
struct task_struct *next)
{
fpstate_save(prev);
fpstate_restore(next);
}
#define switch_to(prev, next, last) \
do { \
(L) = __switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P));\
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
__u64 __nextpcb = virt_to_phys(&task_thread_info(__next)->pcb); \
__switch_to_aux(__prev, __next); \
(last) = __switch_to(__nextpcb, __prev, __next); \
check_mmu_context(); \
} while (0)
......
......@@ -9,6 +9,11 @@
#include <asm/types.h>
#include <asm/sysinfo.h>
typedef struct {
unsigned long seg;
} mm_segment_t;
struct pcb_struct {
unsigned long ksp;
unsigned long usp;
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_TRACE_CLOCK_H
#define _ASM_SW64_TRACE_CLOCK_H
#include <linux/compiler.h>
#include <linux/types.h>
#define ARCH_TRACE_CLOCKS
#endif /* _ASM_SW64_TRACE_CLOCK_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_TYPES_H
#define _ASM_SW64_TYPES_H
#include <asm-generic/int-ll64.h>
#endif /* _ASM_SW64_TYPES_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_UNALIGNED_H
#define _ASM_SW64_UNALIGNED_H
#include <linux/unaligned/le_struct.h>
#include <linux/unaligned/be_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#endif /* _ASM_SW64_UNALIGNED_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Access to VGA videoram
*
* (c) 1998 Martin Mares <mj@ucw.cz>
*/
#ifndef _ASM_SW64_VGA_H
#define _ASM_SW64_VGA_H
#include <asm/io.h>
#define VT_BUF_HAVE_RW
#define VT_BUF_HAVE_MEMSETW
#define VT_BUF_HAVE_MEMCPYW
static inline void scr_writew(u16 val, volatile u16 *addr)
{
if (__is_ioaddr(addr))
__raw_writew(val, (volatile u16 __iomem *) addr);
else
*addr = val;
}
static inline u16 scr_readw(volatile const u16 *addr)
{
if (__is_ioaddr(addr))
return __raw_readw((volatile const u16 __iomem *) addr);
else
return *addr;
}
static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
{
if (__is_ioaddr(s))
memsetw_io((u16 __iomem *) s, c, count);
else
memsetw(s, c, count);
}
/* Do not trust that the usage will be correct; analyze the arguments. */
extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
/*
* ??? These are currently only used for downloading character sets. As
* such, they don't need memory barriers. Is this all they are intended
* to be used for?
*/
#define vga_readb(a) readb((u8 __iomem *)(a))
#define vga_writeb(v, a) writeb(v, (u8 __iomem *)(a))
#ifdef CONFIG_VGA_HOSE
#include <linux/ioport.h>
#include <linux/pci.h>
extern struct pci_controller *pci_vga_hose;
#define __is_port_vga(a) \
(((a) >= 0x3b0) && ((a) < 0x3e0) && \
((a) != 0x3b3) && ((a) != 0x3d3))
#define __is_mem_vga(a) \
(((a) >= 0xa0000) && ((a) <= 0xc0000))
#define FIXUP_IOADDR_VGA(a) do { \
if (pci_vga_hose && __is_port_vga(a)) \
(a) += pci_vga_hose->io_space->start; \
} while (0)
#define FIXUP_MEMADDR_VGA(a) do { \
if (pci_vga_hose && __is_mem_vga(a)) \
(a) += pci_vga_hose->mem_space->start; \
} while (0)
#else /* CONFIG_VGA_HOSE */
#define pci_vga_hose 0
#define __is_port_vga(a) 0
#define __is_mem_vga(a) 0
#define FIXUP_IOADDR_VGA(a)
#define FIXUP_MEMADDR_VGA(a)
#endif /* CONFIG_VGA_HOSE */
#define VGA_MAP_MEM(x, s) ((unsigned long)ioremap(x, s))
#endif
......@@ -33,10 +33,12 @@
#define PC0_RAW_BASE 0x0
#define PC1_RAW_BASE 0x100
#define PC0_MIN 0x0
#define PC0_MAX 0xF
#define PC1_MIN 0x0
#define PC1_MAX 0x37
#define PC1_MAX 0x3D
#define SW64_PERFCTRL_KM 2
#define SW64_PERFCTRL_UM 3
#define SW64_PERFCTRL_AM 4
/* pc0 events */
#define PC0_INSTRUCTIONS 0x0
......
# SPDX-License-Identifier: GPL-2.0
# UAPI Header export list
generic-y += kvm_para.h
generated-y += unistd_64.h
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_CONSOLE_H
#define _UAPI_ASM_SW64_CONSOLE_H
/*
* Console callback routine numbers
*/
#define CCB_GETC 0x01
#define CCB_PUTS 0x02
#define CCB_RESET_TERM 0x03
#define CCB_SET_TERM_INT 0x04
#define CCB_SET_TERM_CTL 0x05
#define CCB_PROCESS_KEYCODE 0x06
#define CCB_OPEN_CONSOLE 0x07
#define CCB_CLOSE_CONSOLE 0x08
#define CCB_OPEN 0x10
#define CCB_CLOSE 0x11
#define CCB_IOCTL 0x12
#define CCB_READ 0x13
#define CCB_WRITE 0x14
#define CCB_SET_ENV 0x20
#define CCB_RESET_ENV 0x21
#define CCB_GET_ENV 0x22
#define CCB_SAVE_ENV 0x23
#define CCB_PSWITCH 0x30
#define CCB_BIOS_EMUL 0x32
/*
* Environment variable numbers
*/
#define ENV_AUTO_ACTION 0x01
#define ENV_BOOT_DEV 0x02
#define ENV_BOOTDEF_DEV 0x03
#define ENV_BOOTED_DEV 0x04
#define ENV_BOOT_FILE 0x05
#define ENV_BOOTED_FILE 0x06
#define ENV_BOOT_OSFLAGS 0x07
#define ENV_BOOTED_OSFLAGS 0x08
#define ENV_BOOT_RESET 0x09
#define ENV_DUMP_DEV 0x0A
#define ENV_ENABLE_AUDIT 0x0B
#define ENV_LICENSE 0x0C
#define ENV_CHAR_SET 0x0D
#define ENV_LANGUAGE 0x0E
#define ENV_TTY_DEV 0x0F
#endif /* _UAPI_ASM_SW64_CONSOLE_H */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_IPCBUF_H
#define _UAPI_ASM_SW64_IPCBUF_H
#include <asm-generic/ipcbuf.h>
#endif
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_KVM_PARA_H
#define _UAPI_ASM_SW64_KVM_PARA_H
#include <asm-generic/kvm_para.h>
#endif
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_MSGBUF_H
#define _UAPI_ASM_SW64_MSGBUF_H
/*
* The msqid64_ds structure for sw64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
long msg_stime; /* last msgsnd time */
long msg_rtime; /* last msgrcv time */
long msg_ctime; /* last change time */
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _UAPI_ASM_SW64_MSGBUF_H */
......@@ -2,15 +2,8 @@
#ifndef _UAPI_ASM_SW64_PARAM_H
#define _UAPI_ASM_SW64_PARAM_H
#define HZ 100
#define EXEC_PAGESIZE 8192
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#include <asm-generic/param.h>
#endif /* _UAPI_ASM_SW64_PARAM_H */
......@@ -13,6 +13,13 @@ enum perf_event_sw64_regs {
PERF_REG_SW64_R6,
PERF_REG_SW64_R7,
PERF_REG_SW64_R8,
PERF_REG_SW64_R9,
PERF_REG_SW64_R10,
PERF_REG_SW64_R11,
PERF_REG_SW64_R12,
PERF_REG_SW64_R13,
PERF_REG_SW64_R14,
PERF_REG_SW64_R15,
PERF_REG_SW64_R19,
PERF_REG_SW64_R20,
PERF_REG_SW64_R21,
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_POLL_H
#define _UAPI_ASM_SW64_POLL_H
#include <asm-generic/poll.h>
#endif
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_POSIX_TYPES_H
#define _UAPI_ASM_SW64_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned long __kernel_ino_t;
#define __kernel_ino_t __kernel_ino_t
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
#include <asm-generic/posix_types.h>
#endif /* _UAPI_ASM_SW64_POSIX_TYPES_H */
......@@ -2,74 +2,30 @@
#ifndef _UAPI_ASM_SW64_PTRACE_H
#define _UAPI_ASM_SW64_PTRACE_H
#include <linux/types.h>
#ifndef __ASSEMBLY__
/*
* This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry
*
* NOTE! I want to minimize the overhead of system calls, so this
* struct has as little information as possible. I does not have
*
* - floating point regs: the kernel doesn't change those
* - r9-15: saved by the C compiler
*
* This makes "fork()" and "exec()" a bit more complex, but should
* give us low system call latency.
* User structures for general purpose, floating point and debug registers.
*/
struct user_pt_regs {
__u64 regs[31];
__u64 pc;
__u64 pstate;
};
struct pt_regs {
unsigned long r0;
unsigned long r1;
unsigned long r2;
unsigned long r3;
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r19;
unsigned long r20;
unsigned long r21;
unsigned long r22;
unsigned long r23;
unsigned long r24;
unsigned long r25;
unsigned long r26;
unsigned long r27;
unsigned long r28;
unsigned long hae;
/* JRP - These are the values provided to a0-a2 by HMcode */
unsigned long trap_a0;
unsigned long trap_a1;
unsigned long trap_a2;
/* These are saved by HMcode: */
unsigned long ps;
unsigned long pc;
unsigned long gp;
unsigned long r16;
unsigned long r17;
unsigned long r18;
/* 256 bits aligned for simd */
struct fpreg {
__u64 v[4] __attribute__((aligned(32)));
};
/*
* This is the extended stack used by signal handlers and the context
* switcher: it's pushed after the normal "struct pt_regs".
*/
struct switch_stack {
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
unsigned long r26;
struct user_fpsimd_state {
struct fpreg fp[31];
__u64 fpcr;
__u64 __reserved[3];
};
#endif
#define PTRACE_GETREGS 12 /* get general purpose registers */
#define PTRACE_SETREGS 13 /* set general purpose registers */
#define PTRACE_GETFPREGS 14 /* get floating-point registers */
#define PTRACE_SETFPREGS 15 /* set floating-point registers */
/* PTRACE_ATTACH is 16 */
/* PTRACE_DETACH is 17 */
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_SEMBUF_H
#define _UAPI_ASM_SW64_SEMBUF_H
/*
* The semid64_ds structure for sw64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
long sem_otime; /* last semop time */
long sem_ctime; /* last change time */
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _UAPI_ASM_SW64_SEMBUF_H */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_SHMBUF_H
#define _UAPI_ASM_SW64_SHMBUF_H
/*
* The shmid64_ds structure for sw64 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 2 miscellaneous 64-bit values
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
long shm_atime; /* last attach time */
long shm_dtime; /* last detach time */
long shm_ctime; /* last change time */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused1;
unsigned long __unused2;
};
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* _UAPI_ASM_SW64_SHMBUF_H */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_STATFS_H
#define _UAPI_ASM_SW64_STATFS_H
#include <linux/types.h>
#include <asm-generic/statfs.h>
#endif
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_TYPES_H
#define _UAPI_ASM_SW64_TYPES_H
/*
* This file is never included by application software unless
* explicitly requested (e.g., via linux/types.h) in which case the
* application is Linux specific so (user-) name space pollution is
* not a major issue. However, for interoperability, libraries still
* need to be careful to avoid a name clashes.
*/
/*
* This is here because we used to use l64 for sw64 and we don't want
* to impact user mode with our change to ll64 in the kernel.
*
* However, some user programs are fine with this. They can
* flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
*/
#ifndef __KERNEL__
#ifndef __SANE_USERSPACE_TYPES__
#include <asm-generic/int-l64.h>
#else
#include <asm-generic/int-ll64.h>
#endif /* __SANE_USERSPACE_TYPES__ */
#endif /* __KERNEL__ */
#endif /* _UAPI_ASM_SW64_TYPES_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_UCONTEXT_H
#define _ASM_SW64_UCONTEXT_H
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_ASM_SW64_UCONTEXT_H
#define _UAPI_ASM_SW64_UCONTEXT_H
struct ucontext {
unsigned long uc_flags;
......@@ -11,4 +11,4 @@ struct ucontext {
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif /* _ASM_SW64_UCONTEXT_H */
#endif /* _UAPI_ASM_SW64_UCONTEXT_H */
......@@ -13,9 +13,9 @@ CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_printk.o = -pg
endif
obj-y := entry.o traps.o process.o sys_sw64.o irq.o \
obj-y := entry.o fpu.o traps.o process.o sys_sw64.o irq.o \
irq_sw64.o signal.o setup.o ptrace.o time.o \
systbls.o dup_print.o tc.o \
systbls.o dup_print.o tc.o timer.o \
insn.o early_init.o topology.o cacheinfo.o \
vdso.o vdso/
......@@ -31,7 +31,7 @@ obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o
obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_PCI) += pci_common.o
obj-$(CONFIG_RELOCATABLE) += relocate.o
obj-$(CONFIG_DEBUG_FS) += unaligned.o segvdbg.o
obj-$(CONFIG_DEBUG_FS) += segvdbg.o bindvcpu.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
ifndef CONFIG_PCI
......@@ -43,7 +43,6 @@ obj-y += kvm_cma.o
endif
# Core logic support
obj-$(CONFIG_SW64) += core.o timer.o
obj-$(CONFIG_SW64_CPUFREQ) += platform.o clock.o
obj-$(CONFIG_SW64_CPUAUTOPLUG) += cpuautoplug.o
......
......@@ -72,6 +72,13 @@ void foo(void)
DEFINE(PT_REGS_R6, offsetof(struct pt_regs, r6));
DEFINE(PT_REGS_R7, offsetof(struct pt_regs, r7));
DEFINE(PT_REGS_R8, offsetof(struct pt_regs, r8));
DEFINE(PT_REGS_R9, offsetof(struct pt_regs, r9));
DEFINE(PT_REGS_R10, offsetof(struct pt_regs, r10));
DEFINE(PT_REGS_R11, offsetof(struct pt_regs, r11));
DEFINE(PT_REGS_R12, offsetof(struct pt_regs, r12));
DEFINE(PT_REGS_R13, offsetof(struct pt_regs, r13));
DEFINE(PT_REGS_R14, offsetof(struct pt_regs, r14));
DEFINE(PT_REGS_R15, offsetof(struct pt_regs, r15));
DEFINE(PT_REGS_R19, offsetof(struct pt_regs, r19));
DEFINE(PT_REGS_R20, offsetof(struct pt_regs, r20));
DEFINE(PT_REGS_R21, offsetof(struct pt_regs, r21));
......@@ -93,58 +100,6 @@ void foo(void)
DEFINE(PT_REGS_R18, offsetof(struct pt_regs, r18));
BLANK();
DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
DEFINE(SWITCH_STACK_R9, offsetof(struct switch_stack, r9));
DEFINE(SWITCH_STACK_R10, offsetof(struct switch_stack, r10));
DEFINE(SWITCH_STACK_R11, offsetof(struct switch_stack, r11));
DEFINE(SWITCH_STACK_R12, offsetof(struct switch_stack, r12));
DEFINE(SWITCH_STACK_R13, offsetof(struct switch_stack, r13));
DEFINE(SWITCH_STACK_R14, offsetof(struct switch_stack, r14));
DEFINE(SWITCH_STACK_R15, offsetof(struct switch_stack, r15));
DEFINE(SWITCH_STACK_RA, offsetof(struct switch_stack, r26));
BLANK();
DEFINE(ALLREGS_SIZE, sizeof(struct allregs));
DEFINE(ALLREGS_R0, offsetof(struct allregs, regs[0]));
DEFINE(ALLREGS_R1, offsetof(struct allregs, regs[1]));
DEFINE(ALLREGS_R2, offsetof(struct allregs, regs[2]));
DEFINE(ALLREGS_R3, offsetof(struct allregs, regs[3]));
DEFINE(ALLREGS_R4, offsetof(struct allregs, regs[4]));
DEFINE(ALLREGS_R5, offsetof(struct allregs, regs[5]));
DEFINE(ALLREGS_R6, offsetof(struct allregs, regs[6]));
DEFINE(ALLREGS_R7, offsetof(struct allregs, regs[7]));
DEFINE(ALLREGS_R8, offsetof(struct allregs, regs[8]));
DEFINE(ALLREGS_R9, offsetof(struct allregs, regs[9]));
DEFINE(ALLREGS_R10, offsetof(struct allregs, regs[10]));
DEFINE(ALLREGS_R11, offsetof(struct allregs, regs[11]));
DEFINE(ALLREGS_R12, offsetof(struct allregs, regs[12]));
DEFINE(ALLREGS_R13, offsetof(struct allregs, regs[13]));
DEFINE(ALLREGS_R14, offsetof(struct allregs, regs[14]));
DEFINE(ALLREGS_R15, offsetof(struct allregs, regs[15]));
DEFINE(ALLREGS_R16, offsetof(struct allregs, regs[16]));
DEFINE(ALLREGS_R17, offsetof(struct allregs, regs[17]));
DEFINE(ALLREGS_R18, offsetof(struct allregs, regs[18]));
DEFINE(ALLREGS_R19, offsetof(struct allregs, regs[19]));
DEFINE(ALLREGS_R20, offsetof(struct allregs, regs[20]));
DEFINE(ALLREGS_R21, offsetof(struct allregs, regs[21]));
DEFINE(ALLREGS_R22, offsetof(struct allregs, regs[22]));
DEFINE(ALLREGS_R23, offsetof(struct allregs, regs[23]));
DEFINE(ALLREGS_R24, offsetof(struct allregs, regs[24]));
DEFINE(ALLREGS_R25, offsetof(struct allregs, regs[25]));
DEFINE(ALLREGS_R26, offsetof(struct allregs, regs[26]));
DEFINE(ALLREGS_R27, offsetof(struct allregs, regs[27]));
DEFINE(ALLREGS_R28, offsetof(struct allregs, regs[28]));
DEFINE(ALLREGS_R29, offsetof(struct allregs, regs[29]));
DEFINE(ALLREGS_R30, offsetof(struct allregs, regs[30]));
DEFINE(ALLREGS_R31, offsetof(struct allregs, regs[31]));
DEFINE(ALLREGS_PS, offsetof(struct allregs, ps));
DEFINE(ALLREGS_PC, offsetof(struct allregs, pc));
DEFINE(ALLREGS_GP, offsetof(struct allregs, gp));
DEFINE(ALLREGS_A0, offsetof(struct allregs, a0));
DEFINE(ALLREGS_A1, offsetof(struct allregs, a1));
DEFINE(ALLREGS_A2, offsetof(struct allregs, a2));
BLANK();
DEFINE(KVM_REGS_SIZE, sizeof(struct kvm_regs));
DEFINE(KVM_REGS_R0, offsetof(struct kvm_regs, r0));
DEFINE(KVM_REGS_R1, offsetof(struct kvm_regs, r1));
......@@ -223,39 +178,48 @@ void foo(void)
DEFINE(HOST_INT_R16, offsetof(struct host_int_args, r16));
BLANK();
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_CTX_FP, offsetof(struct thread_struct, ctx_fp));
DEFINE(THREAD_FPCR, offsetof(struct thread_struct, fpcr));
DEFINE(CTX_FP_F0, offsetof(struct context_fpregs, f0));
DEFINE(CTX_FP_F1, offsetof(struct context_fpregs, f1));
DEFINE(CTX_FP_F2, offsetof(struct context_fpregs, f2));
DEFINE(CTX_FP_F3, offsetof(struct context_fpregs, f3));
DEFINE(CTX_FP_F4, offsetof(struct context_fpregs, f4));
DEFINE(CTX_FP_F5, offsetof(struct context_fpregs, f5));
DEFINE(CTX_FP_F6, offsetof(struct context_fpregs, f6));
DEFINE(CTX_FP_F7, offsetof(struct context_fpregs, f7));
DEFINE(CTX_FP_F8, offsetof(struct context_fpregs, f8));
DEFINE(CTX_FP_F9, offsetof(struct context_fpregs, f9));
DEFINE(CTX_FP_F10, offsetof(struct context_fpregs, f10));
DEFINE(CTX_FP_F11, offsetof(struct context_fpregs, f11));
DEFINE(CTX_FP_F12, offsetof(struct context_fpregs, f12));
DEFINE(CTX_FP_F13, offsetof(struct context_fpregs, f13));
DEFINE(CTX_FP_F14, offsetof(struct context_fpregs, f14));
DEFINE(CTX_FP_F15, offsetof(struct context_fpregs, f15));
DEFINE(CTX_FP_F16, offsetof(struct context_fpregs, f16));
DEFINE(CTX_FP_F17, offsetof(struct context_fpregs, f17));
DEFINE(CTX_FP_F18, offsetof(struct context_fpregs, f18));
DEFINE(CTX_FP_F19, offsetof(struct context_fpregs, f19));
DEFINE(CTX_FP_F20, offsetof(struct context_fpregs, f20));
DEFINE(CTX_FP_F21, offsetof(struct context_fpregs, f21));
DEFINE(CTX_FP_F22, offsetof(struct context_fpregs, f22));
DEFINE(CTX_FP_F23, offsetof(struct context_fpregs, f23));
DEFINE(CTX_FP_F24, offsetof(struct context_fpregs, f24));
DEFINE(CTX_FP_F25, offsetof(struct context_fpregs, f25));
DEFINE(CTX_FP_F26, offsetof(struct context_fpregs, f26));
DEFINE(CTX_FP_F27, offsetof(struct context_fpregs, f27));
DEFINE(CTX_FP_F28, offsetof(struct context_fpregs, f28));
DEFINE(CTX_FP_F29, offsetof(struct context_fpregs, f29));
DEFINE(CTX_FP_F30, offsetof(struct context_fpregs, f30));
OFFSET(TASK_THREAD, task_struct, thread);
OFFSET(TASK_THREAD_F0, task_struct, thread.fpstate.fp[0]);
OFFSET(TASK_THREAD_F1, task_struct, thread.fpstate.fp[1]);
OFFSET(TASK_THREAD_F2, task_struct, thread.fpstate.fp[2]);
OFFSET(TASK_THREAD_F3, task_struct, thread.fpstate.fp[3]);
OFFSET(TASK_THREAD_F4, task_struct, thread.fpstate.fp[4]);
OFFSET(TASK_THREAD_F5, task_struct, thread.fpstate.fp[5]);
OFFSET(TASK_THREAD_F6, task_struct, thread.fpstate.fp[6]);
OFFSET(TASK_THREAD_F7, task_struct, thread.fpstate.fp[7]);
OFFSET(TASK_THREAD_F8, task_struct, thread.fpstate.fp[8]);
OFFSET(TASK_THREAD_F9, task_struct, thread.fpstate.fp[9]);
OFFSET(TASK_THREAD_F10, task_struct, thread.fpstate.fp[10]);
OFFSET(TASK_THREAD_F11, task_struct, thread.fpstate.fp[11]);
OFFSET(TASK_THREAD_F12, task_struct, thread.fpstate.fp[12]);
OFFSET(TASK_THREAD_F13, task_struct, thread.fpstate.fp[13]);
OFFSET(TASK_THREAD_F14, task_struct, thread.fpstate.fp[14]);
OFFSET(TASK_THREAD_F15, task_struct, thread.fpstate.fp[15]);
OFFSET(TASK_THREAD_F16, task_struct, thread.fpstate.fp[16]);
OFFSET(TASK_THREAD_F17, task_struct, thread.fpstate.fp[17]);
OFFSET(TASK_THREAD_F18, task_struct, thread.fpstate.fp[18]);
OFFSET(TASK_THREAD_F19, task_struct, thread.fpstate.fp[19]);
OFFSET(TASK_THREAD_F20, task_struct, thread.fpstate.fp[20]);
OFFSET(TASK_THREAD_F21, task_struct, thread.fpstate.fp[21]);
OFFSET(TASK_THREAD_F22, task_struct, thread.fpstate.fp[22]);
OFFSET(TASK_THREAD_F23, task_struct, thread.fpstate.fp[23]);
OFFSET(TASK_THREAD_F24, task_struct, thread.fpstate.fp[24]);
OFFSET(TASK_THREAD_F25, task_struct, thread.fpstate.fp[25]);
OFFSET(TASK_THREAD_F26, task_struct, thread.fpstate.fp[26]);
OFFSET(TASK_THREAD_F27, task_struct, thread.fpstate.fp[27]);
OFFSET(TASK_THREAD_F28, task_struct, thread.fpstate.fp[28]);
OFFSET(TASK_THREAD_F29, task_struct, thread.fpstate.fp[29]);
OFFSET(TASK_THREAD_F30, task_struct, thread.fpstate.fp[30]);
OFFSET(TASK_THREAD_FPCR, task_struct, thread.fpstate.fpcr);
BLANK();
OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
BLANK();
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Wang Yuanheng
* Author: Wang Yuanheng
*
*/
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <asm/debug.h>
extern bool bind_vcpu_enabled;
static int __init bind_vcpu_init(void)
{
struct dentry *bindvcpu;
if (!sw64_debugfs_dir)
return -ENODEV;
bindvcpu = debugfs_create_bool("bind_vcpu", 0644,
sw64_debugfs_dir, &bind_vcpu_enabled);
if (!bindvcpu)
return -ENOMEM;
return 0;
}
late_initcall(bind_vcpu_init);
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/memblock.h>
#ifdef CONFIG_DISCONTIGMEM
#ifdef CONFIG_NUMA
int pa_to_nid(unsigned long pa)
{
int i = 0;
phys_addr_t pfn_base, pfn_size, pfn;
pfn = pa >> PAGE_SHIFT;
for (i = 0; i < MAX_NUMNODES; i++) {
if (!NODE_DATA(i))
continue;
pfn_base = NODE_DATA(i)->node_start_pfn;
pfn_size = NODE_DATA(i)->node_spanned_pages;
if (pfn >= pfn_base && pfn < pfn_base + pfn_size)
return i;
}
pr_err("%s: pa %#lx does not belong to any node, return node 0\n", __func__, pa);
return 0;
}
EXPORT_SYMBOL(pa_to_nid);
#else /* !CONFIG_NUMA */
int pa_to_nid(unsigned long pa)
{
return 0;
}
EXPORT_SYMBOL(pa_to_nid);
#endif /* CONFIG_NUMA */
#endif /* CONFIG_DISCONTIGMEM */
......@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <asm/chip3_io.h>
#include <asm/io.h>
#ifdef CONFIG_SW64_RRK
......@@ -18,7 +19,7 @@ unsigned long sw64_printk_offset;
* For output the kernel message on the console
* with full-system emulator.
*/
#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL | PAGE_OFFSET)
#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL)
int sw64_printk(const char *fmt, va_list args)
{
......@@ -38,9 +39,10 @@ int sw64_printk(const char *fmt, va_list args)
} else {
printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args);
if (is_in_emul()) {
unsigned long write_addr = QEMU_PRINTF_BUFF_BASE;
*(unsigned long *)write_addr = (unsigned long)((((unsigned long)sw64_printk_buf) & 0xffffffffUL)
| ((unsigned long)printed_len << 32));
void __iomem *addr = __va(QEMU_PRINTF_BUFF_BASE);
u64 data = ((u64)sw64_printk_buf & 0xffffffffUL)
| ((u64)printed_len << 32);
*(u64 *)addr = data;
}
}
sw64_printk_offset += printed_len;
......
......@@ -21,52 +21,84 @@
* the hmcode-provided values are available to the signal handler.
*/
#define SAVE_ALL \
ldi $sp, -PT_REGS_PS($sp); \
stl $0, PT_REGS_R0($sp); \
stl $1, PT_REGS_R1($sp); \
stl $2, PT_REGS_R2($sp); \
stl $3, PT_REGS_R3($sp); \
stl $4, PT_REGS_R4($sp); \
stl $28, PT_REGS_R28($sp); \
stl $5, PT_REGS_R5($sp); \
stl $6, PT_REGS_R6($sp); \
stl $7, PT_REGS_R7($sp); \
stl $8, PT_REGS_R8($sp); \
stl $19, PT_REGS_R19($sp); \
stl $20, PT_REGS_R20($sp); \
stl $21, PT_REGS_R21($sp); \
stl $22, PT_REGS_R22($sp); \
stl $23, PT_REGS_R23($sp); \
stl $24, PT_REGS_R24($sp); \
stl $25, PT_REGS_R25($sp); \
stl $26, PT_REGS_R26($sp); \
stl $27, PT_REGS_R27($sp); \
stl $16, PT_REGS_TRAP_A0($sp); \
stl $17, PT_REGS_TRAP_A1($sp); \
.macro SAVE_COMMON_REGS
ldi $sp, -PT_REGS_PS($sp)
stl $0, PT_REGS_R0($sp)
stl $1, PT_REGS_R1($sp)
stl $2, PT_REGS_R2($sp)
stl $3, PT_REGS_R3($sp)
stl $4, PT_REGS_R4($sp)
stl $28, PT_REGS_R28($sp)
stl $5, PT_REGS_R5($sp)
stl $6, PT_REGS_R6($sp)
stl $7, PT_REGS_R7($sp)
stl $8, PT_REGS_R8($sp)
stl $19, PT_REGS_R19($sp)
stl $20, PT_REGS_R20($sp)
stl $21, PT_REGS_R21($sp)
stl $22, PT_REGS_R22($sp)
stl $23, PT_REGS_R23($sp)
stl $24, PT_REGS_R24($sp)
stl $25, PT_REGS_R25($sp)
stl $26, PT_REGS_R26($sp)
stl $27, PT_REGS_R27($sp)
stl $16, PT_REGS_TRAP_A0($sp)
stl $17, PT_REGS_TRAP_A1($sp)
stl $18, PT_REGS_TRAP_A2($sp)
.endm
#define RESTORE_ALL \
ldl $0, PT_REGS_R0($sp); \
ldl $1, PT_REGS_R1($sp); \
ldl $2, PT_REGS_R2($sp); \
ldl $3, PT_REGS_R3($sp); \
ldl $4, PT_REGS_R4($sp); \
ldl $5, PT_REGS_R5($sp); \
ldl $6, PT_REGS_R6($sp); \
ldl $7, PT_REGS_R7($sp); \
ldl $8, PT_REGS_R8($sp); \
ldl $19, PT_REGS_R19($sp); \
ldl $20, PT_REGS_R20($sp); \
ldl $21, PT_REGS_R21($sp); \
ldl $22, PT_REGS_R22($sp); \
ldl $23, PT_REGS_R23($sp); \
ldl $24, PT_REGS_R24($sp); \
ldl $25, PT_REGS_R25($sp); \
ldl $26, PT_REGS_R26($sp); \
ldl $27, PT_REGS_R27($sp); \
ldl $28, PT_REGS_R28($sp); \
.macro RESTORE_COMMON_REGS
ldl $0, PT_REGS_R0($sp)
ldl $1, PT_REGS_R1($sp)
ldl $2, PT_REGS_R2($sp)
ldl $3, PT_REGS_R3($sp)
ldl $4, PT_REGS_R4($sp)
ldl $5, PT_REGS_R5($sp)
ldl $6, PT_REGS_R6($sp)
ldl $7, PT_REGS_R7($sp)
ldl $8, PT_REGS_R8($sp)
ldl $19, PT_REGS_R19($sp)
ldl $20, PT_REGS_R20($sp)
ldl $21, PT_REGS_R21($sp)
ldl $22, PT_REGS_R22($sp)
ldl $23, PT_REGS_R23($sp)
ldl $24, PT_REGS_R24($sp)
ldl $25, PT_REGS_R25($sp)
ldl $26, PT_REGS_R26($sp)
ldl $27, PT_REGS_R27($sp)
ldl $28, PT_REGS_R28($sp)
ldi $sp, PT_REGS_PS($sp)
.endm
.macro SAVE_CALLEE_REGS
stl $9, PT_REGS_R9($sp)
stl $10, PT_REGS_R10($sp)
stl $11, PT_REGS_R11($sp)
stl $12, PT_REGS_R12($sp)
stl $13, PT_REGS_R13($sp)
stl $14, PT_REGS_R14($sp)
stl $15, PT_REGS_R15($sp)
.endm
.macro RESTORE_CALLEE_REGS
ldl $9, PT_REGS_R9($sp)
ldl $10, PT_REGS_R10($sp)
ldl $11, PT_REGS_R11($sp)
ldl $12, PT_REGS_R12($sp)
ldl $13, PT_REGS_R13($sp)
ldl $14, PT_REGS_R14($sp)
ldl $15, PT_REGS_R15($sp)
.endm
.macro SAVE_ALL
SAVE_COMMON_REGS
SAVE_CALLEE_REGS
.endm
.macro RESTORE_ALL
RESTORE_CALLEE_REGS
RESTORE_COMMON_REGS
.endm
/*
* Non-syscall kernel entry points.
......@@ -101,31 +133,11 @@ entArith:
.ent entMM
entMM:
SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */
subl $sp, SWITCH_STACK_RA, $sp
stl $9, SWITCH_STACK_R9($sp)
stl $10, SWITCH_STACK_R10($sp)
stl $11, SWITCH_STACK_R11($sp)
stl $12, SWITCH_STACK_R12($sp)
stl $13, SWITCH_STACK_R13($sp)
stl $14, SWITCH_STACK_R14($sp)
stl $15, SWITCH_STACK_R15($sp)
addl $sp, SWITCH_STACK_RA, $19
/* handle the fault */
ldi $8, 0x3fff
ldi $26, ret_from_sys_call
bic $sp, $8, $8
call $26, do_page_fault
/* reload the registers after the exception code played. */
ldl $9, SWITCH_STACK_R9($sp)
ldl $10, SWITCH_STACK_R10($sp)
ldl $11, SWITCH_STACK_R11($sp)
ldl $12, SWITCH_STACK_R12($sp)
ldl $13, SWITCH_STACK_R13($sp)
ldl $14, SWITCH_STACK_R14($sp)
ldl $15, SWITCH_STACK_R15($sp)
addl $sp, SWITCH_STACK_RA, $sp
/* finish up the syscall as normal. */
br ret_from_sys_call
mov $sp, $19
call $31, do_page_fault
.end entMM
.align 4
......@@ -140,109 +152,32 @@ entIF:
call $31, do_entIF
.end entIF
/*
* Handle unalignment exception.
* We don't handle the "gp" register correctly, but if we fault on a
* gp-register unaligned load/store, something is _very_ wrong in the
* kernel anyway.
*/
.align 4
.globl entUna
.ent entUna
entUna:
ldi $sp, -ALLREGS_PS($sp)
stl $0, ALLREGS_R0($sp)
ldl $0, ALLREGS_PS($sp) /* get PS */
stl $1, ALLREGS_R1($sp)
stl $2, ALLREGS_R2($sp)
stl $3, ALLREGS_R3($sp)
and $0, 8, $0 /* user mode? */
stl $4, ALLREGS_R4($sp)
bne $0, entUnaUser /* yup -> do user-level unaligned fault */
stl $5, ALLREGS_R5($sp)
stl $6, ALLREGS_R6($sp)
stl $7, ALLREGS_R7($sp)
stl $8, ALLREGS_R8($sp)
stl $9, ALLREGS_R9($sp)
stl $10, ALLREGS_R10($sp)
stl $11, ALLREGS_R11($sp)
stl $12, ALLREGS_R12($sp)
stl $13, ALLREGS_R13($sp)
stl $14, ALLREGS_R14($sp)
stl $15, ALLREGS_R15($sp)
/* 16-18 HMCODE-saved */
stl $19, ALLREGS_R19($sp)
stl $20, ALLREGS_R20($sp)
stl $21, ALLREGS_R21($sp)
stl $22, ALLREGS_R22($sp)
stl $23, ALLREGS_R23($sp)
stl $24, ALLREGS_R24($sp)
stl $25, ALLREGS_R25($sp)
stl $26, ALLREGS_R26($sp)
stl $27, ALLREGS_R27($sp)
stl $28, ALLREGS_R28($sp)
mov $sp, $19
stl $gp, ALLREGS_R29($sp)
SAVE_ALL
ldi $8, 0x3fff
stl $31, ALLREGS_R31($sp)
bic $sp, $8, $8
mov $sp, $19
ldl $0, PT_REGS_PS($sp)
and $0, 8, $0 /* user mode ? */
beq $0, 1f
ldi $26, ret_from_sys_call
call $31, do_entUnaUser /* return to ret_from_syscall */
1: ldl $9, PT_REGS_GP($sp)
call $26, do_entUna
ldl $0, ALLREGS_R0($sp)
ldl $1, ALLREGS_R1($sp)
ldl $2, ALLREGS_R2($sp)
ldl $3, ALLREGS_R3($sp)
ldl $4, ALLREGS_R4($sp)
ldl $5, ALLREGS_R5($sp)
ldl $6, ALLREGS_R6($sp)
ldl $7, ALLREGS_R7($sp)
ldl $8, ALLREGS_R8($sp)
ldl $9, ALLREGS_R9($sp)
ldl $10, ALLREGS_R10($sp)
ldl $11, ALLREGS_R11($sp)
ldl $12, ALLREGS_R12($sp)
ldl $13, ALLREGS_R13($sp)
ldl $14, ALLREGS_R14($sp)
ldl $15, ALLREGS_R15($sp)
/* 16-18 HMCODE-saved */
ldl $19, ALLREGS_R19($sp)
ldl $20, ALLREGS_R20($sp)
ldl $21, ALLREGS_R21($sp)
ldl $22, ALLREGS_R22($sp)
ldl $23, ALLREGS_R23($sp)
ldl $24, ALLREGS_R24($sp)
ldl $25, ALLREGS_R25($sp)
ldl $26, ALLREGS_R26($sp)
ldl $27, ALLREGS_R27($sp)
ldl $28, ALLREGS_R28($sp)
ldl $gp, ALLREGS_R29($sp)
ldi $sp, ALLREGS_PS($sp)
stl $9, PT_REGS_GP($sp)
RESTORE_ALL
sys_call HMC_rti
.end entUna
.align 4
.ent entUnaUser
entUnaUser:
ldl $0, ALLREGS_R0($sp) /* restore original $0 */
ldi $sp, ALLREGS_PS($sp) /* pop entUna's stack frame */
SAVE_ALL /* setup normal kernel stack */
ldi $sp, -SWITCH_STACK_RA($sp)
stl $9, SWITCH_STACK_R9($sp)
stl $10, SWITCH_STACK_R10($sp)
stl $11, SWITCH_STACK_R11($sp)
stl $12, SWITCH_STACK_R12($sp)
stl $13, SWITCH_STACK_R13($sp)
stl $14, SWITCH_STACK_R14($sp)
stl $15, SWITCH_STACK_R15($sp)
ldi $8, 0x3fff
addl $sp, SWITCH_STACK_RA, $19
bic $sp, $8, $8
call $26, do_entUnaUser
ldl $9, SWITCH_STACK_R9($sp)
ldl $10, SWITCH_STACK_R10($sp)
ldl $11, SWITCH_STACK_R11($sp)
ldl $12, SWITCH_STACK_R12($sp)
ldl $13, SWITCH_STACK_R13($sp)
ldl $14, SWITCH_STACK_R14($sp)
ldl $15, SWITCH_STACK_R15($sp)
ldi $sp, SWITCH_STACK_RA($sp)
br ret_from_sys_call
.end entUnaUser
/*
* The system call entry point is special. Most importantly, it looks
* like a function call to userspace as far as clobbered registers. We
......@@ -368,9 +303,7 @@ $work_resched:
$work_notifysig:
mov $sp, $16
bsr $1, do_switch_stack
call $26, do_work_pending
bsr $1, undo_switch_stack
br restore_all
.end work_pending
......@@ -384,14 +317,9 @@ $work_notifysig:
.ent strace
strace:
/* set up signal stack, call syscall_trace */
bsr $1, do_switch_stack
mov $0, $9
mov $19, $10
call $26, syscall_trace_enter
mov $9, $18
mov $10, $19
bsr $1, undo_switch_stack
blt $0, $syscall_trace_failed
/* get the system call number and the arguments back.. */
......@@ -420,10 +348,7 @@ ret_from_straced:
stl $31, PT_REGS_R19($sp) /* a3=0 => no error */
$strace_success:
stl $0, PT_REGS_R0($sp) /* save return value */
bsr $1, do_switch_stack
call $26, syscall_trace_leave
bsr $1, undo_switch_stack
br $31, ret_from_sys_call
.align 3
......@@ -438,172 +363,66 @@ $strace_error:
stl $0, PT_REGS_R0($sp)
stl $1, PT_REGS_R19($sp) /* a3 for return */
bsr $1, do_switch_stack
mov $18, $9 /* save old syscall number */
mov $19, $10 /* save old a3 */
call $26, syscall_trace_leave
mov $9, $18
mov $10, $19
bsr $1, undo_switch_stack
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call
$syscall_trace_failed:
bsr $1, do_switch_stack
mov $18, $9
mov $19, $10
call $26, syscall_trace_leave
mov $9, $18
mov $10, $19
bsr $1, undo_switch_stack
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call
.end strace
.align 4
.ent do_switch_stack
do_switch_stack:
ldi $sp, -SWITCH_STACK_SIZE($sp)
flds $f31, 0($sp) /* fillde hint */
stl $9, SWITCH_STACK_R9($sp)
stl $10, SWITCH_STACK_R10($sp)
stl $11, SWITCH_STACK_R11($sp)
stl $12, SWITCH_STACK_R12($sp)
stl $13, SWITCH_STACK_R13($sp)
stl $14, SWITCH_STACK_R14($sp)
stl $15, SWITCH_STACK_R15($sp)
stl $26, SWITCH_STACK_RA($sp)
// SIMD-FP
ldl $9, TI_TASK($8)
ldi $9, TASK_THREAD($9)
ldi $10, THREAD_CTX_FP($9)
vstd $f0, CTX_FP_F0($10)
vstd $f1, CTX_FP_F1($10)
vstd $f2, CTX_FP_F2($10)
vstd $f3, CTX_FP_F3($10)
vstd $f4, CTX_FP_F4($10)
vstd $f5, CTX_FP_F5($10)
vstd $f6, CTX_FP_F6($10)
vstd $f7, CTX_FP_F7($10)
vstd $f8, CTX_FP_F8($10)
vstd $f9, CTX_FP_F9($10)
vstd $f10, CTX_FP_F10($10)
vstd $f11, CTX_FP_F11($10)
vstd $f12, CTX_FP_F12($10)
vstd $f13, CTX_FP_F13($10)
vstd $f14, CTX_FP_F14($10)
vstd $f15, CTX_FP_F15($10)
vstd $f16, CTX_FP_F16($10)
vstd $f17, CTX_FP_F17($10)
vstd $f18, CTX_FP_F18($10)
vstd $f19, CTX_FP_F19($10)
vstd $f20, CTX_FP_F20($10)
vstd $f21, CTX_FP_F21($10)
vstd $f22, CTX_FP_F22($10)
vstd $f23, CTX_FP_F23($10)
vstd $f24, CTX_FP_F24($10)
vstd $f25, CTX_FP_F25($10)
vstd $f26, CTX_FP_F26($10)
vstd $f27, CTX_FP_F27($10)
rfpcr $f0
vstd $f28, CTX_FP_F28($10)
vstd $f29, CTX_FP_F29($10)
vstd $f30, CTX_FP_F30($10)
fstd $f0, THREAD_FPCR($9)
vldd $f0, CTX_FP_F0($10)
ldl $9, SWITCH_STACK_R9($sp)
ldl $10, SWITCH_STACK_R10($sp)
ret $31, ($1), 1
.end do_switch_stack
.align 4
.ent undo_switch_stack
undo_switch_stack:
#ifdef CONFIG_SUBARCH_C3B
fillcs 0($sp) /* prefetch */
#endif
ldl $11, SWITCH_STACK_R11($sp)
ldl $12, SWITCH_STACK_R12($sp)
ldl $13, SWITCH_STACK_R13($sp)
ldl $14, SWITCH_STACK_R14($sp)
ldl $15, SWITCH_STACK_R15($sp)
ldl $26, SWITCH_STACK_RA($sp)
// SIMD-FP
ldl $9, TI_TASK($8)
ldi $9, TASK_THREAD($9)
fldd $f0, THREAD_FPCR($9)
wfpcr $f0
fimovd $f0, $10
and $10, 0x3, $10
beq $10, $setfpec_0
subl $10, 0x1, $10
beq $10, $setfpec_1
subl $10, 0x1, $10
beq $10, $setfpec_2
setfpec3
br $setfpec_over
$setfpec_0:
setfpec0
br $setfpec_over
$setfpec_1:
setfpec1
br $setfpec_over
$setfpec_2:
setfpec2
$setfpec_over:
ldi $10, THREAD_CTX_FP($9)
vldd $f0, CTX_FP_F0($10)
vldd $f1, CTX_FP_F1($10)
vldd $f2, CTX_FP_F2($10)
vldd $f3, CTX_FP_F3($10)
vldd $f4, CTX_FP_F4($10)
vldd $f5, CTX_FP_F5($10)
vldd $f6, CTX_FP_F6($10)
vldd $f7, CTX_FP_F7($10)
vldd $f8, CTX_FP_F8($10)
vldd $f9, CTX_FP_F9($10)
vldd $f10, CTX_FP_F10($10)
vldd $f11, CTX_FP_F11($10)
vldd $f12, CTX_FP_F12($10)
vldd $f13, CTX_FP_F13($10)
vldd $f14, CTX_FP_F14($10)
vldd $f15, CTX_FP_F15($10)
vldd $f16, CTX_FP_F16($10)
vldd $f17, CTX_FP_F17($10)
vldd $f18, CTX_FP_F18($10)
vldd $f19, CTX_FP_F19($10)
vldd $f20, CTX_FP_F20($10)
vldd $f21, CTX_FP_F21($10)
vldd $f22, CTX_FP_F22($10)
vldd $f23, CTX_FP_F23($10)
vldd $f24, CTX_FP_F24($10)
vldd $f25, CTX_FP_F25($10)
vldd $f26, CTX_FP_F26($10)
vldd $f27, CTX_FP_F27($10)
vldd $f28, CTX_FP_F28($10)
vldd $f29, CTX_FP_F29($10)
vldd $f30, CTX_FP_F30($10)
ldl $9, SWITCH_STACK_R9($sp)
ldl $10, SWITCH_STACK_R10($sp)
ldi $sp, SWITCH_STACK_SIZE($sp)
ret $31, ($1), 1
.end undo_switch_stack
/*
* The meat of the context switch code.
* Integer register context switch
* The callee-saved registers must be saved and restored.
*
* a0: physical address of next task's pcb, used by hmcode
* a1: previous task_struct (must be preserved across the switch)
* a2: next task_struct
*
* The value of a1 must be preserved by this function, as that's how
* arguments are passed to schedule_tail.
*/
.align 4
.globl __switch_to
.ent __switch_to
__switch_to:
.prologue 0
bsr $1, do_switch_stack
/* Save context into prev->thread */
stl $26, TASK_THREAD_RA($17)
stl $30, TASK_THREAD_SP($17)
stl $9, TASK_THREAD_S0($17)
stl $10, TASK_THREAD_S1($17)
stl $11, TASK_THREAD_S2($17)
stl $12, TASK_THREAD_S3($17)
stl $13, TASK_THREAD_S4($17)
stl $14, TASK_THREAD_S5($17)
stl $15, TASK_THREAD_S6($17)
/* Restore context from next->thread */
ldl $26, TASK_THREAD_RA($18)
ldl $9, TASK_THREAD_S0($18)
ldl $10, TASK_THREAD_S1($18)
ldl $11, TASK_THREAD_S2($18)
ldl $12, TASK_THREAD_S3($18)
ldl $13, TASK_THREAD_S4($18)
ldl $14, TASK_THREAD_S5($18)
ldl $15, TASK_THREAD_S6($18)
sys_call HMC_swpctx
/*
* SP has been saved and restored by HMC_swpctx,
* and restore it again here for future expansion.
*/
ldl $30, TASK_THREAD_SP($18)
ldi $8, 0x3fff
bic $sp, $8, $8
bsr $1, undo_switch_stack
mov $17, $0
ret
.end __switch_to
......@@ -637,30 +456,6 @@ ret_from_kernel_thread:
br $31, ret_to_user
.end ret_from_kernel_thread
/*
* Special system calls. Most of these are special in that they either
* have to play switch_stack games or in some way use the pt_regs struct.
*/
.macro fork_like name
.align 4
.globl sw64_\name
.ent sw64_\name
sw64_\name:
.prologue 0
bsr $1, do_switch_stack
call $26, sys_\name
ldl $26, SWITCH_STACK_RA($sp)
ldi $sp, SWITCH_STACK_SIZE($sp)
ret
.end sw64_\name
.endm
fork_like fork
fork_like vfork
fork_like clone
fork_like clone3
.align 4
.globl sys_sigreturn
.ent sys_sigreturn
......@@ -668,12 +463,10 @@ sys_sigreturn:
.prologue 0
ldi $9, ret_from_straced
cmpult $26, $9, $9
ldi $sp, -SWITCH_STACK_SIZE($sp)
call $26, do_sigreturn
bne $9, 1f
call $26, syscall_trace_leave
1: br $1, undo_switch_stack
br ret_from_sys_call
1: br ret_from_sys_call
.end sys_sigreturn
.align 4
......@@ -683,12 +476,10 @@ sys_rt_sigreturn:
.prologue 0
ldi $9, ret_from_straced
cmpult $26, $9, $9
ldi $sp, -SWITCH_STACK_SIZE($sp)
call $26, do_rt_sigreturn
bne $9, 1f
call $26, syscall_trace_leave
1: br $1, undo_switch_stack
br ret_from_sys_call
1: br ret_from_sys_call
.end sys_rt_sigreturn
.align 4
......
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/regdef.h>
.text
.set noat
ENTRY(__fpstate_save)
/* a0: prev task */
vstd $f0, TASK_THREAD_F0(a0)
vstd $f1, TASK_THREAD_F1(a0)
vstd $f2, TASK_THREAD_F2(a0)
vstd $f3, TASK_THREAD_F3(a0)
vstd $f4, TASK_THREAD_F4(a0)
vstd $f5, TASK_THREAD_F5(a0)
vstd $f6, TASK_THREAD_F6(a0)
vstd $f7, TASK_THREAD_F7(a0)
vstd $f8, TASK_THREAD_F8(a0)
vstd $f9, TASK_THREAD_F9(a0)
vstd $f10, TASK_THREAD_F10(a0)
vstd $f11, TASK_THREAD_F11(a0)
vstd $f12, TASK_THREAD_F12(a0)
vstd $f13, TASK_THREAD_F13(a0)
vstd $f14, TASK_THREAD_F14(a0)
vstd $f15, TASK_THREAD_F15(a0)
vstd $f16, TASK_THREAD_F16(a0)
vstd $f17, TASK_THREAD_F17(a0)
vstd $f18, TASK_THREAD_F18(a0)
vstd $f19, TASK_THREAD_F19(a0)
vstd $f20, TASK_THREAD_F20(a0)
vstd $f21, TASK_THREAD_F21(a0)
vstd $f22, TASK_THREAD_F22(a0)
vstd $f23, TASK_THREAD_F23(a0)
vstd $f24, TASK_THREAD_F24(a0)
vstd $f25, TASK_THREAD_F25(a0)
vstd $f26, TASK_THREAD_F26(a0)
vstd $f27, TASK_THREAD_F27(a0)
rfpcr $f0
vstd $f28, TASK_THREAD_F28(a0)
vstd $f29, TASK_THREAD_F29(a0)
vstd $f30, TASK_THREAD_F30(a0)
fstd $f0, TASK_THREAD_FPCR(a0)
vldd $f0, TASK_THREAD_F0(a0)
ret
END(__fpstate_save)
ENTRY(__fpstate_restore)
/* a0: next task */
fldd $f0, TASK_THREAD_FPCR(a0)
wfpcr $f0
fimovd $f0, t1
and t1, 0x3, t1
beq t1, $setfpec_0
subl t1, 0x1, t1
beq t1, $setfpec_1
subl t1, 0x1, t1
beq t1, $setfpec_2
setfpec3
br $setfpec_over
$setfpec_0:
setfpec0
br $setfpec_over
$setfpec_1:
setfpec1
br $setfpec_over
$setfpec_2:
setfpec2
$setfpec_over:
vldd $f0, TASK_THREAD_F0(a0)
vldd $f1, TASK_THREAD_F1(a0)
vldd $f2, TASK_THREAD_F2(a0)
vldd $f3, TASK_THREAD_F3(a0)
vldd $f4, TASK_THREAD_F4(a0)
vldd $f5, TASK_THREAD_F5(a0)
vldd $f6, TASK_THREAD_F6(a0)
vldd $f7, TASK_THREAD_F7(a0)
vldd $f8, TASK_THREAD_F8(a0)
vldd $f9, TASK_THREAD_F9(a0)
vldd $f10, TASK_THREAD_F10(a0)
vldd $f11, TASK_THREAD_F11(a0)
vldd $f12, TASK_THREAD_F12(a0)
vldd $f13, TASK_THREAD_F13(a0)
vldd $f14, TASK_THREAD_F14(a0)
vldd $f15, TASK_THREAD_F15(a0)
vldd $f16, TASK_THREAD_F16(a0)
vldd $f17, TASK_THREAD_F17(a0)
vldd $f18, TASK_THREAD_F18(a0)
vldd $f19, TASK_THREAD_F19(a0)
vldd $f20, TASK_THREAD_F20(a0)
vldd $f21, TASK_THREAD_F21(a0)
vldd $f22, TASK_THREAD_F22(a0)
vldd $f23, TASK_THREAD_F23(a0)
vldd $f24, TASK_THREAD_F24(a0)
vldd $f25, TASK_THREAD_F25(a0)
vldd $f26, TASK_THREAD_F26(a0)
vldd $f27, TASK_THREAD_F27(a0)
vldd $f28, TASK_THREAD_F28(a0)
vldd $f29, TASK_THREAD_F29(a0)
vldd $f30, TASK_THREAD_F30(a0)
ret
END(__fpstate_restore)
......@@ -34,13 +34,13 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "r7", 8, offsetof(struct pt_regs, r7)},
{ "r8", 8, offsetof(struct pt_regs, r8)},
{ "r9", 8, -1 },
{ "r10", 8, -1 },
{ "r11", 8, -1 },
{ "r12", 8, -1 },
{ "r13", 8, -1 },
{ "r14", 8, -1 },
{ "r15", 8, -1 },
{ "r9", 8, offsetof(struct pt_regs, r9)},
{ "r10", 8, offsetof(struct pt_regs, r10)},
{ "r11", 8, offsetof(struct pt_regs, r11)},
{ "r12", 8, offsetof(struct pt_regs, r12)},
{ "r13", 8, offsetof(struct pt_regs, r13)},
{ "r14", 8, offsetof(struct pt_regs, r14)},
{ "r15", 8, offsetof(struct pt_regs, r15)},
{ "r16", 8, offsetof(struct pt_regs, r16)},
{ "r17", 8, offsetof(struct pt_regs, r17)},
......
......@@ -221,7 +221,7 @@ void __init common_init_pci(void)
struct pci_bus *bus;
unsigned int init_busnr;
int need_domain_info = 0;
int ret, iov_bus;
int ret;
unsigned long offset;
/* Scan all of the recorded PCI controllers. */
......@@ -257,20 +257,20 @@ void __init common_init_pci(void)
bus = hose->bus = bridge->bus;
hose->need_domain_info = need_domain_info;
while (pci_find_bus(pci_domain_nr(bus), last_bus))
last_bus++;
if (is_in_host())
iov_bus = chip_pcie_configure(hose);
last_bus += iov_bus;
last_bus = chip_pcie_configure(hose);
else
while (pci_find_bus(pci_domain_nr(bus), last_bus))
last_bus++;
hose->last_busno = hose->busn_space->end = last_bus - 1;
hose->last_busno = hose->busn_space->end = last_bus;
init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS);
init_busnr &= ~(0xff << 16);
init_busnr |= (last_bus - 1) << 16;
init_busnr |= last_bus << 16;
write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr);
pci_bus_update_busn_res_end(bus, last_bus - 1);
pci_bus_update_busn_res_end(bus, last_bus);
last_bus++;
}
pcibios_claim_console_setup();
......@@ -358,12 +358,8 @@ asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned lon
return -EOPNOTSUPP;
}
/* Destroy an __iomem token. Not copied from lib/iomap.c. */
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
if (__is_mmio(addr))
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);
......@@ -402,7 +398,7 @@ int sw6_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn,
{
u32 data;
struct pci_controller *hose = bus->sysdata;
void __iomem *cfg_iobase = (void *)hose->rc_config_space_base;
void __iomem *cfg_iobase = hose->rc_config_space_base;
if (IS_ENABLED(CONFIG_PCI_DEBUG))
pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t",
......@@ -553,9 +549,8 @@ static void __iomem *sw6_pcie_map_bus(struct pci_bus *bus,
return NULL;
relbus = (bus->number << 24) | (devfn << 16) | where;
relbus |= PCI_EP_CFG;
cfg_iobase = (void *)(hose->ep_config_space_base | relbus);
cfg_iobase = hose->ep_config_space_base + relbus;
if (IS_ENABLED(CONFIG_PCI_DEBUG))
pr_debug("addr:%px bus %d, devfn %d, where %d\n",
......@@ -605,15 +600,7 @@ sw64_init_host(unsigned long node, unsigned long index)
}
}
static void set_devint_wken(int node)
{
unsigned long val;
/* enable INTD wakeup */
val = 0x80;
sw64_io_write(node, DEVINT_WKEN, val);
sw64_io_write(node, DEVINTWK_INTEN, val);
}
void __weak set_devint_wken(int node) {}
void __init sw64_init_arch(void)
{
......@@ -656,6 +643,8 @@ void __init sw64_init_arch(void)
}
}
void __weak set_pcieport_service_irq(int node, int index) {}
static void __init sw64_init_intx(struct pci_controller *hose)
{
unsigned long int_conf, node, val_node;
......@@ -684,8 +673,7 @@ static void __init sw64_init_intx(struct pci_controller *hose)
if (sw64_chip_init->pci_init.set_intx)
sw64_chip_init->pci_init.set_intx(node, index, int_conf);
write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0);
write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0);
set_pcieport_service_irq(node, index);
}
void __init sw64_init_irq(void)
......@@ -703,3 +691,16 @@ sw64_init_pci(void)
{
common_init_pci();
}
static int setup_bus_dma_cb(struct pci_dev *pdev, void *data)
{
pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
return 0;
}
static void fix_bus_dma_limit(struct pci_dev *dev)
{
pci_walk_bus(dev->subordinate, setup_bus_dma_cb, NULL);
pr_info("Set zx200 bus_dma_limit to 32-bit\n");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ZHAOXIN, 0x071f, fix_bus_dma_limit);
......@@ -6,6 +6,8 @@
#ifndef _SW64_KERNEL_PCI_IMPL_H
#define _SW64_KERNEL_PCI_IMPL_H
#include <asm/sw64io.h>
struct pci_dev;
struct pci_controller;
......
......@@ -6,6 +6,7 @@
*/
#include <linux/perf_event.h>
#include <asm/stacktrace.h>
/* For tracking PMCs and the hw events they monitor on each CPU. */
struct cpu_hw_events {
......@@ -243,14 +244,13 @@ static const struct sw64_perf_event *core3_map_cache_event(u64 config)
/*
* r0xx for counter0, r1yy for counter1.
* According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 37
* According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 3D
*/
static bool core3_raw_event_valid(u64 config)
{
if ((config >= (PC0_RAW_BASE + PC0_MIN) && config <= (PC0_RAW_BASE + PC0_MAX)) ||
(config >= (PC1_RAW_BASE + PC1_MIN) && config <= (PC1_RAW_BASE + PC1_MAX))) {
if ((config >= PC0_RAW_BASE && config <= (PC0_RAW_BASE + PC0_MAX)) ||
(config >= PC1_RAW_BASE && config <= (PC1_RAW_BASE + PC1_MAX)))
return true;
}
pr_info("sw64 pmu: invalid raw event config %#llx\n", config);
return false;
......@@ -297,31 +297,33 @@ static int sw64_perf_event_set_period(struct perf_event *event,
{
long left = local64_read(&hwc->period_left);
long period = hwc->sample_period;
int ret = 0;
int overflow = 0;
unsigned long value;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
overflow = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
overflow = 1;
}
if (left > (long)sw64_pmu->pmc_max_period)
left = sw64_pmu->pmc_max_period;
local64_set(&hwc->prev_count, (unsigned long)(-left));
sw64_write_pmc(idx, (unsigned long)(sw64_pmu->pmc_max_period - left));
value = sw64_pmu->pmc_max_period - left;
local64_set(&hwc->prev_count, value);
sw64_write_pmc(idx, value);
perf_event_update_userpage(event);
return ret;
return overflow;
}
/*
......@@ -457,8 +459,8 @@ static void sw64_pmu_start(struct perf_event *event, int flags)
hwc->state = 0;
/* counting in all modes, for both counters */
wrperfmon(PERFMON_CMD_PM, 4);
/* counting in selected modes, for both counters */
wrperfmon(PERFMON_CMD_PM, hwc->config_base);
if (hwc->idx == PERFMON_PC0) {
wrperfmon(PERFMON_CMD_EVENT_PC0, hwc->event_base);
wrperfmon(PERFMON_CMD_ENABLE, PERFMON_ENABLE_ARGS_PC0);
......@@ -519,9 +521,12 @@ static int __hw_perf_event_init(struct perf_event *event)
const struct sw64_perf_event *event_type;
/* SW64 do not have per-counter usr/os/guest/host bits */
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_hv || event->attr.exclude_idle ||
/*
* SW64 does not have per-counter usr/os/guest/host bits,
* we can distinguish exclude_user and exclude_kernel by
* sample mode.
*/
if (event->attr.exclude_hv || event->attr.exclude_idle ||
event->attr.exclude_host || event->attr.exclude_guest)
return -EINVAL;
......@@ -553,6 +558,13 @@ static int __hw_perf_event_init(struct perf_event *event)
hwc->event_base = attr->config & 0xff; /* event selector */
}
hwc->config_base = SW64_PERFCTRL_AM;
if (attr->exclude_user)
hwc->config_base = SW64_PERFCTRL_KM;
if (attr->exclude_kernel)
hwc->config_base = SW64_PERFCTRL_UM;
hwc->config = attr->config;
if (!is_sampling_event(event))
......@@ -687,6 +699,36 @@ bool valid_dy_addr(unsigned long addr)
return ret;
}
#ifdef CONFIG_FRAME_POINTER
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct stack_frame frame;
unsigned long __user *fp;
int err;
perf_callchain_store(entry, regs->pc);
fp = (unsigned long __user *)regs->r15;
while (entry->nr < entry->max_stack && (unsigned long)fp < current->mm->start_stack) {
if (!access_ok(fp, sizeof(frame)))
break;
pagefault_disable();
err = __copy_from_user_inatomic(&frame, fp, sizeof(frame));
pagefault_enable();
if (err)
break;
if (valid_utext_addr(frame.return_address) || valid_dy_addr(frame.return_address))
perf_callchain_store(entry, frame.return_address);
fp = (void __user *)frame.next_frame;
}
}
#else /* !CONFIG_FRAME_POINTER */
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
......@@ -699,30 +741,38 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
while (entry->nr < entry->max_stack && usp < current->mm->start_stack) {
if (!access_ok(usp, 8))
break;
pagefault_disable();
err = __get_user(user_addr, (unsigned long *)usp);
pagefault_enable();
if (err)
break;
if (valid_utext_addr(user_addr) || valid_dy_addr(user_addr))
perf_callchain_store(entry, user_addr);
usp = usp + 8;
}
}
#endif/* CONFIG_FRAME_POINTER */
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
/*
* Gets called by walk_stackframe() for every stackframe. This will be called
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
static int callchain_trace(unsigned long pc, void *data)
{
unsigned long *sp = (unsigned long *)current_thread_info()->pcb.ksp;
unsigned long addr;
struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, regs->pc);
perf_callchain_store(entry, pc);
return 0;
}
while (!kstack_end(sp) && entry->nr < entry->max_stack) {
addr = *sp++;
if (__kernel_text_address(addr))
perf_callchain_store(entry, addr);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
walk_stackframe(NULL, regs, callchain_trace, entry);
}
/*
......
......@@ -11,6 +11,7 @@
#include <linux/random.h>
#include <asm/fpu.h>
#include <asm/switch_to.h>
#include "proto.h"
......@@ -109,7 +110,7 @@ void
show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
dik_show_regs(regs, NULL);
dik_show_regs(regs);
}
/*
......@@ -143,6 +144,13 @@ release_thread(struct task_struct *dead_task)
{
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
fpstate_save(src);
*dst = *src;
return 0;
}
/*
* Copy architecture-specific thread state
*/
......@@ -158,19 +166,17 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
struct thread_info *childti = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
struct pt_regs *regs = current_pt_regs();
struct switch_stack *childstack, *stack;
childstack = ((struct switch_stack *) childregs) - 1;
childti->pcb.ksp = (unsigned long) childstack;
childti->pcb.ksp = (unsigned long) childregs;
childti->pcb.flags = 7; /* set FEN, clear everything else */
p->thread.sp = (unsigned long) childregs;
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(childstack, 0,
sizeof(struct switch_stack) + sizeof(struct pt_regs));
childstack->r26 = (unsigned long) ret_from_kernel_thread;
childstack->r9 = usp; /* function */
childstack->r10 = kthread_arg;
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.ra = (unsigned long) ret_from_kernel_thread;
p->thread.s[0] = usp; /* function */
p->thread.s[1] = kthread_arg;
childti->pcb.usp = 0;
return 0;
}
......@@ -189,136 +195,36 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
*childregs = *regs;
childregs->r0 = 0;
childregs->r19 = 0;
stack = ((struct switch_stack *) regs) - 1;
*childstack = *stack;
p->thread = current->thread;
childstack->r26 = (unsigned long) ret_from_fork;
p->thread.ra = (unsigned long) ret_from_fork;
return 0;
}
/*
* Fill in the user structure for a ELF core dump.
* @regs: should be signal_pt_regs() or task_pt_reg(task)
*/
void
dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *regs)
{
/* switch stack follows right below pt_regs: */
struct switch_stack *sw = ((struct switch_stack *) pt) - 1;
dest[0] = pt->r0;
dest[1] = pt->r1;
dest[2] = pt->r2;
dest[3] = pt->r3;
dest[4] = pt->r4;
dest[5] = pt->r5;
dest[6] = pt->r6;
dest[7] = pt->r7;
dest[8] = pt->r8;
dest[9] = sw->r9;
dest[10] = sw->r10;
dest[11] = sw->r11;
dest[12] = sw->r12;
dest[13] = sw->r13;
dest[14] = sw->r14;
dest[15] = sw->r15;
dest[16] = pt->r16;
dest[17] = pt->r17;
dest[18] = pt->r18;
dest[19] = pt->r19;
dest[20] = pt->r20;
dest[21] = pt->r21;
dest[22] = pt->r22;
dest[23] = pt->r23;
dest[24] = pt->r24;
dest[25] = pt->r25;
dest[26] = pt->r26;
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = pt->pc;
int i;
struct thread_info *ti;
/* Once upon a time this was the PS value. Which is stupid
* since that is always 8 for usermode. Usurped for the more
* useful value of the thread's UNIQUE field.
*/
dest[32] = ti->pcb.unique;
}
EXPORT_SYMBOL(dump_elf_thread);
ti = (void *)((__u64)regs & ~(THREAD_SIZE - 1));
int
dump_elf_task(elf_greg_t *dest, struct task_struct *task)
{
dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
return 1;
for (i = 0; i < 30; i++)
dest[i] = *(__u64 *)((void *)regs + regoffsets[i]);
dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = regs->pc;
dest[32] = ti->pcb.unique;
}
EXPORT_SYMBOL(dump_elf_task);
EXPORT_SYMBOL(sw64_elf_core_copy_regs);
int
dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
/* Fill in the fpu structure for a core dump. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
memcpy(dest, &task->thread.ctx_fp, 32 * 8);
memcpy(fpu, &current->thread.fpstate, sizeof(*fpu));
return 1;
}
EXPORT_SYMBOL(dump_elf_task_fp);
/*
* Return saved PC of a blocked thread. This assumes the frame
* pointer is the 6th saved long on the kernel stack and that the
* saved return address is the first long in the frame. This all
* holds provided the thread blocked through a call to schedule() ($15
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
*
* Under heavy swap load I've seen this lose in an ugly way. So do
* some extra sanity checking on the ranges we expect these pointers
* to be in so that we can fail gracefully. This is just for ps after
* all. -- r~
*/
unsigned long
thread_saved_pc(struct task_struct *t)
{
unsigned long base = (unsigned long)task_stack_page(t);
unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
if (sp > base && sp+6*8 < base + 16*1024) {
fp = ((unsigned long *)sp)[6];
if (fp > sp && fp < base + 16*1024)
return *(unsigned long *)fp;
}
return 0;
}
unsigned long
get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc, base, sp;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* This one depends on the frame size of schedule(). Do a
* "disass schedule" in gdb to find the frame size. Also, the
* code assumes that sleep_on() follows immediately after
* interruptible_sleep_on() and that add_timer() follows
* immediately after interruptible_sleep(). Ugly, isn't it?
* Maybe adding a wchan field to task_struct would be better,
* after all...
*/
pc = thread_saved_pc(p);
if (in_sched_functions(pc)) {
base = (unsigned long)task_stack_page(p);
sp = task_thread_info(p)->pcb.ksp;
schedule_frame = ((unsigned long *)sp)[6];
if (schedule_frame > sp && schedule_frame < base + 16*1024)
return ((unsigned long *)schedule_frame)[12];
}
return pc;
}
EXPORT_SYMBOL(dump_fpu);
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
......
......@@ -5,14 +5,15 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/pgtable.h>
#include <asm/sw64io.h>
/* ptrace.c */
extern int ptrace_set_bpt(struct task_struct *child);
extern int ptrace_cancel_bpt(struct task_struct *child);
/* traps.c */
extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15);
extern void die_if_kernel(char *str, struct pt_regs *regs, long err, unsigned long *r9_15);
extern void dik_show_regs(struct pt_regs *regs);
extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
/* timer.c */
extern void setup_timer(void);
......
......@@ -7,8 +7,12 @@
#include <linux/tracehook.h>
#include <linux/audit.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/sched/task_stack.h>
#include <asm/reg.h>
#include <asm/asm-offsets.h>
#include "proto.h"
......@@ -33,10 +37,6 @@
* | frame generated by SAVE_ALL | |
* | | v
* +================================+
* | | ^
* | frame saved by do_switch_stack | | struct switch_stack
* | | v
* +================================+
*/
/*
......@@ -56,27 +56,18 @@ enum {
REG_GP = 29
};
#define PT_REG(reg) \
(PAGE_SIZE * 2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
#define SW_REG(reg) \
(PAGE_SIZE * 2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
+ offsetof(struct switch_stack, reg))
#define FP_REG(fp_regno, vector_regno) \
(fp_regno * 32 + vector_regno * 8)
static int regoff[] = {
PT_REG(r0), PT_REG(r1), PT_REG(r2), PT_REG(r3),
PT_REG(r4), PT_REG(r5), PT_REG(r6), PT_REG(r7),
PT_REG(r8), SW_REG(r9), SW_REG(r10), SW_REG(r11),
SW_REG(r12), SW_REG(r13), SW_REG(r14), SW_REG(r15),
PT_REG(r16), PT_REG(r17), PT_REG(r18), PT_REG(r19),
PT_REG(r20), PT_REG(r21), PT_REG(r22), PT_REG(r23),
PT_REG(r24), PT_REG(r25), PT_REG(r26), PT_REG(r27),
PT_REG(r28), PT_REG(gp), -1, -1
#define R(x) ((size_t) &((struct pt_regs *)0)->x)
short regoffsets[32] = {
R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
R(r9), R(r10), R(r11), R(r12), R(r13), R(r14), R(r15),
R(r16), R(r17), R(r18),
R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
R(r27), R(r28), R(gp), 0, 0
};
#undef R
#define PCB_OFF(var) offsetof(struct pcb_struct, var)
static int pcboff[] = {
......@@ -98,8 +89,8 @@ static unsigned long zero;
static unsigned long *
get_reg_addr(struct task_struct *task, unsigned long regno)
{
unsigned long *addr;
int fp_regno, vector_regno;
void *addr;
int fno, vno;
switch (regno) {
case USP:
......@@ -112,12 +103,11 @@ get_reg_addr(struct task_struct *task, unsigned long regno)
addr = (void *)task_thread_info(task) + pcboff[regno];
break;
case REG_BASE ... REG_END:
addr = (void *)task_thread_info(task) + regoff[regno];
addr = (void *)task_pt_regs(task) + regoffsets[regno];
break;
case FPREG_BASE ... FPREG_END:
fp_regno = regno - FPREG_BASE;
vector_regno = 0;
addr = (void *)((unsigned long)&task->thread.ctx_fp + FP_REG(fp_regno, vector_regno));
fno = regno - FPREG_BASE;
addr = &task->thread.fpstate.fp[fno].v[0];
break;
case VECREG_BASE ... VECREG_END:
/*
......@@ -128,15 +118,15 @@ get_reg_addr(struct task_struct *task, unsigned long regno)
addr = &zero;
break;
}
fp_regno = (regno - VECREG_BASE) & 0x1f;
vector_regno = 1 + ((regno - VECREG_BASE) >> 5);
addr = (void *)((unsigned long)&task->thread.ctx_fp + FP_REG(fp_regno, vector_regno));
fno = (regno - VECREG_BASE) & 0x1f;
vno = 1 + ((regno - VECREG_BASE) >> 5);
addr = &task->thread.fpstate.fp[fno].v[vno];
break;
case FPCR:
addr = (void *)&task->thread.fpcr;
addr = &task->thread.fpstate.fpcr;
break;
case PC:
addr = (void *)task_thread_info(task) + PT_REG(pc);
addr = (void *)task_pt_regs(task) + PT_REGS_PC;
break;
default:
addr = &zero;
......@@ -279,114 +269,103 @@ void ptrace_disable(struct task_struct *child)
user_disable_single_step(child);
}
int ptrace_getregs(struct task_struct *child, __s64 __user *data)
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
int ret, retval = 0;
int i;
unsigned long regval;
struct pt_regs *regs;
struct user_pt_regs uregs;
int i, ret;
if (!access_ok(data, sizeof(long) * 33))
return -EIO;
regs = task_pt_regs(target);
for (i = 0; i < 30; i++)
uregs.regs[i] = *(__u64 *)((void *)regs + regoffsets[i]);
uregs.regs[30] = task_thread_info(target)->pcb.usp;
uregs.pc = regs->pc;
uregs.pstate = regs->ps;
ret = membuf_write(&to, &uregs, sizeof(uregs));
/* r0-r15 */
for (i = 0; i < 16; i++) {
regval = get_reg(child, i);
retval |= __put_user((long)regval, data + i);
}
/* r19-r28 */
for (i = 19; i < 29; i++) {
regval = get_reg(child, i);
retval |= __put_user((long)regval, data + i - 3);
}
/*SP, PS ,PC,GP*/
retval |= __put_user((long)(get_reg(child, REG_SP)), data + EF_SP);
retval |= __put_user((long)(get_reg(child, REG_PS)), data + EF_PS);
retval |= __put_user((long)(get_reg(child, REG_PC)), data + EF_PC);
retval |= __put_user((long)(get_reg(child, REG_GP)), data + EF_GP);
/* r16-r18 */
retval |= __put_user((long)(get_reg(child, 16)), data + EF_A0);
retval |= __put_user((long)(get_reg(child, 17)), data + EF_A1);
retval |= __put_user((long)(get_reg(child, 18)), data + EF_A2);
ret = retval ? -EIO : 0;
return ret;
}
int ptrace_setregs(struct task_struct *child, __s64 __user *data)
static int gpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret, retval = 0;
int i;
unsigned long regval;
struct pt_regs *regs;
struct user_pt_regs uregs;
int i, ret;
if (!access_ok(data, sizeof(long) * 33))
return -EIO;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&uregs, 0, sizeof(uregs));
if (ret)
return ret;
regs = task_pt_regs(target);
for (i = 0; i < 30; i++)
*(__u64 *)((void *)regs + regoffsets[i]) = uregs.regs[i];
task_thread_info(target)->pcb.usp = uregs.regs[30];
regs->pc = uregs.pc;
regs->ps = uregs.pstate;
/* r0-r15 */
for (i = 0; i < 16; i++) {
retval |= __get_user(regval, data + i);
ret = put_reg(child, i, regval);
}
/* r19-r28 */
for (i = 19; i < 29; i++) {
retval |= __get_user(regval, data + i - 3);
ret = put_reg(child, i, regval);
}
/*SP, PS ,PC,GP*/
retval |= __get_user(regval, data + EF_SP);
ret = put_reg(child, REG_SP, regval);
retval |= __get_user(regval, data + EF_PS);
ret = put_reg(child, REG_PS, regval);
retval |= __get_user(regval, data + EF_PC);
ret = put_reg(child, REG_PC, regval);
retval |= __get_user(regval, data + EF_GP);
ret = put_reg(child, REG_GP, regval);
/* r16-r18 */
retval |= __get_user(regval, data + EF_A0);
ret = put_reg(child, 16, regval);
retval |= __get_user(regval, data + EF_A1);
ret = put_reg(child, 17, regval);
retval |= __get_user(regval, data + EF_A2);
ret = put_reg(child, 18, regval);
ret = retval ? -EIO : 0;
return 0;
}
int ptrace_getfpregs(struct task_struct *child, __s64 __user *data)
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
int ret, retval = 0;
int i;
unsigned long regval;
if (!access_ok(data, sizeof(long) * 32))
return -EIO;
/* fp0-fp31 */
for (i = 0; i < 32; i++) {
regval = get_reg(child, REG_F0 + i);
retval |= __put_user((long)regval, data + i);
}
ret = retval ? -EIO : 0;
return 0;
return membuf_write(&to, &target->thread.fpstate,
sizeof(struct user_fpsimd_state));
}
int ptrace_setfpregs(struct task_struct *child, __s64 __user *data)
static int fpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret, retval = 0;
int i;
unsigned long regval;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpstate, 0,
sizeof(struct user_fpsimd_state));
}
if (!access_ok(data, sizeof(long) * 32))
return -EIO;
enum sw64_regset {
REGSET_GPR,
REGSET_FPR,
};
/* fp0-fp31 */
for (i = 0; i < 32; i++) {
retval |= __get_user(regval, data + i);
ret = put_reg(child, REG_F0 + i, regval);
}
static const struct user_regset sw64_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(elf_greg_t),
.align = sizeof(elf_greg_t),
.regset_get = gpr_get,
.set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpsimd_state) / sizeof(u64),
.size = sizeof(u64),
.align = sizeof(u64),
.regset_get = fpr_get,
.set = fpr_set
},
};
return ret;
static const struct user_regset_view user_sw64_view = {
.name = "sw64", .e_machine = EM_SW64,
.regsets = sw64_regsets, .n = ARRAY_SIZE(sw64_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sw64_view;
}
long arch_ptrace(struct task_struct *child, long request,
......@@ -395,7 +374,6 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned long tmp;
size_t copied;
long ret;
void __user *datavp = (void __user *) data;
switch (request) {
/* When I and D space are separate, these will need to be fixed. */
......@@ -425,18 +403,6 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_POKEUSR: /* write the specified register */
ret = put_reg(child, addr, data);
break;
case PTRACE_GETREGS:
ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, datavp);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
......@@ -521,7 +487,7 @@ int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_r
case MMCSR__DA_MATCH:
case MMCSR__DV_MATCH:
case MMCSR__DAV_MATCH:
dik_show_regs(regs, (unsigned long *)regs-15);
dik_show_regs(regs);
if (!(current->ptrace & PT_PTRACED)) {
printk(" pid %d %s not be ptraced, return\n", current->pid, current->comm);
......@@ -628,6 +594,13 @@ static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r6),
REG_OFFSET_NAME(r7),
REG_OFFSET_NAME(r8),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(r11),
REG_OFFSET_NAME(r12),
REG_OFFSET_NAME(r13),
REG_OFFSET_NAME(r14),
REG_OFFSET_NAME(r15),
REG_OFFSET_NAME(r19),
REG_OFFSET_NAME(r20),
REG_OFFSET_NAME(r21),
......
......@@ -560,16 +560,20 @@ static void __init setup_machine_fdt(void)
#ifdef CONFIG_USE_OF
void *dt_virt;
const char *name;
unsigned long phys_addr;
/* Give a chance to select kernel builtin DTB firstly */
if (IS_ENABLED(CONFIG_SW64_BUILTIN_DTB))
dt_virt = (void *)__dtb_start;
else
else {
dt_virt = (void *)sunway_boot_params->dtb_start;
if (virt_to_phys(dt_virt) < virt_to_phys(__bss_stop)) {
pr_emerg("BUG: DTB has been corrupted by kernel image!\n");
while (true)
cpu_relax();
}
}
phys_addr = __phys_addr((unsigned long)dt_virt);
if (!phys_addr_valid(phys_addr) ||
if (!phys_addr_valid(virt_to_phys(dt_virt)) ||
!early_init_dt_scan(dt_virt)) {
pr_crit("\n"
"Error: invalid device tree blob at virtual address %px\n"
......@@ -901,7 +905,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
"physical id\t: %d\n"
"bogomips\t: %lu.%02lu\n",
cpu_freq, cpu_data[i].tcache.size >> 10,
cpu_to_rcid(i),
cpu_topology[i].package_id,
loops_per_jiffy / (500000/HZ),
(loops_per_jiffy / (5000/HZ)) % 100);
......@@ -972,7 +976,7 @@ static int __init debugfs_sw64(void)
{
struct dentry *d;
d = debugfs_create_dir("sw_64", NULL);
d = debugfs_create_dir("sw64", NULL);
if (!d)
return -ENOMEM;
sw64_debugfs_dir = d;
......@@ -1020,8 +1024,7 @@ static int __init sw64_kvm_pool_init(void)
end_page = pfn_to_page((kvm_mem_base + kvm_mem_size - 1) >> PAGE_SHIFT);
p = base_page;
while (page_ref_count(p) == 0 &&
(unsigned long)p <= (unsigned long)end_page) {
while (p <= end_page && page_ref_count(p) == 0) {
set_page_count(p, 1);
page_mapcount_reset(p);
SetPageReserved(p);
......
......@@ -14,6 +14,7 @@
#include <asm/ucontext.h>
#include <asm/vdso.h>
#include <asm/switch_to.h>
#include "proto.h"
......@@ -22,8 +23,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage void ret_from_sys_call(void);
SYSCALL_DEFINE2(odd_sigprocmask, int, how, unsigned long, newmask)
{
sigset_t oldmask;
......@@ -64,13 +63,10 @@ static long
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
unsigned long usp;
struct switch_stack *sw = (struct switch_stack *)regs - 1;
long err = __get_user(regs->pc, &sc->sc_pc);
current->restart_block.fn = do_no_restart_syscall;
sw->r26 = (unsigned long) ret_from_sys_call;
err |= __get_user(regs->r0, sc->sc_regs+0);
err |= __get_user(regs->r1, sc->sc_regs+1);
err |= __get_user(regs->r2, sc->sc_regs+2);
......@@ -80,13 +76,13 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
err |= __get_user(regs->r6, sc->sc_regs+6);
err |= __get_user(regs->r7, sc->sc_regs+7);
err |= __get_user(regs->r8, sc->sc_regs+8);
err |= __get_user(sw->r9, sc->sc_regs+9);
err |= __get_user(sw->r10, sc->sc_regs+10);
err |= __get_user(sw->r11, sc->sc_regs+11);
err |= __get_user(sw->r12, sc->sc_regs+12);
err |= __get_user(sw->r13, sc->sc_regs+13);
err |= __get_user(sw->r14, sc->sc_regs+14);
err |= __get_user(sw->r15, sc->sc_regs+15);
err |= __get_user(regs->r9, sc->sc_regs+9);
err |= __get_user(regs->r10, sc->sc_regs+10);
err |= __get_user(regs->r11, sc->sc_regs+11);
err |= __get_user(regs->r12, sc->sc_regs+12);
err |= __get_user(regs->r13, sc->sc_regs+13);
err |= __get_user(regs->r14, sc->sc_regs+14);
err |= __get_user(regs->r15, sc->sc_regs+15);
err |= __get_user(regs->r16, sc->sc_regs+16);
err |= __get_user(regs->r17, sc->sc_regs+17);
err |= __get_user(regs->r18, sc->sc_regs+18);
......@@ -104,9 +100,12 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
err |= __get_user(usp, sc->sc_regs+30);
wrusp(usp);
/* simd-fp */
err |= __copy_from_user(&current->thread.ctx_fp,
&sc->sc_fpregs, sizeof(struct context_fpregs));
err |= __get_user(current->thread.fpcr, &sc->sc_fpcr);
err |= __copy_from_user(&current->thread.fpstate, &sc->sc_fpregs,
offsetof(struct user_fpsimd_state, fpcr));
err |= __get_user(current->thread.fpstate.fpcr, &sc->sc_fpcr);
if (likely(!err))
__fpstate_restore(current);
return err;
}
......@@ -191,7 +190,6 @@ static long
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask, unsigned long sp)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
long err = 0;
err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack);
......@@ -208,13 +206,13 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
err |= __put_user(regs->r6, sc->sc_regs+6);
err |= __put_user(regs->r7, sc->sc_regs+7);
err |= __put_user(regs->r8, sc->sc_regs+8);
err |= __put_user(sw->r9, sc->sc_regs+9);
err |= __put_user(sw->r10, sc->sc_regs+10);
err |= __put_user(sw->r11, sc->sc_regs+11);
err |= __put_user(sw->r12, sc->sc_regs+12);
err |= __put_user(sw->r13, sc->sc_regs+13);
err |= __put_user(sw->r14, sc->sc_regs+14);
err |= __put_user(sw->r15, sc->sc_regs+15);
err |= __put_user(regs->r9, sc->sc_regs+9);
err |= __put_user(regs->r10, sc->sc_regs+10);
err |= __put_user(regs->r11, sc->sc_regs+11);
err |= __put_user(regs->r12, sc->sc_regs+12);
err |= __put_user(regs->r13, sc->sc_regs+13);
err |= __put_user(regs->r14, sc->sc_regs+14);
err |= __put_user(regs->r15, sc->sc_regs+15);
err |= __put_user(regs->r16, sc->sc_regs+16);
err |= __put_user(regs->r17, sc->sc_regs+17);
err |= __put_user(regs->r18, sc->sc_regs+18);
......@@ -232,9 +230,10 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
err |= __put_user(sp, sc->sc_regs+30);
err |= __put_user(0, sc->sc_regs+31);
/* simd-fp */
err |= __copy_to_user(&sc->sc_fpregs,
&current->thread.ctx_fp, sizeof(struct context_fpregs));
err |= __put_user(current->thread.fpcr, &sc->sc_fpcr);
__fpstate_save(current);
err |= __copy_to_user(&sc->sc_fpregs, &current->thread.fpstate,
offsetof(struct user_fpsimd_state, fpcr));
err |= __put_user(current->thread.fpstate.fpcr, &sc->sc_fpcr);
err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0);
err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1);
......
......@@ -8,38 +8,207 @@
#include <linux/stacktrace.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/debug.h>
#include <linux/ftrace.h>
#include <linux/perf_event.h>
#include <linux/kallsyms.h>
#include <asm/stacktrace.h>
/*
* Save stack-backtrace addresses into a stack_trace buffer.
* sw_64 PCS assigns the frame pointer to r15.
*
* A simple function prologue looks like this:
* ldi sp,-xx(sp)
* stl ra,0(sp)
* stl fp,8(sp)
* mov sp,fp
*
* A simple function epilogue looks like this:
* mov fp,sp
* ldl ra,0(sp)
* ldl fp,8(sp)
* ldi sp,+xx(sp)
*/
void save_stack_trace(struct stack_trace *trace)
#ifdef CONFIG_FRAME_POINTER
int unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
save_stack_trace_tsk(current, trace);
unsigned long fp = frame->fp;
if (fp & 0x7)
return -EINVAL;
if (!tsk)
tsk = current;
if (!on_accessible_stack(tsk, fp, NULL))
return -EINVAL;
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
/*
* Frames created upon entry from user have NULL FP and PC values, so
* don't bother reporting these. Frames created by __noreturn functions
* might have a valid FP even if PC is bogus, so only terminate where
* both are NULL.
*/
if (!frame->fp && !frame->pc)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
EXPORT_SYMBOL_GPL(unwind_frame);
void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs,
int (*fn)(unsigned long, void *), void *data)
{
unsigned long pc, fp;
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
struct stackframe frame;
if (regs) {
pc = regs->pc;
fp = regs->r15;
} else if (tsk == current || tsk == NULL) {
fp = (unsigned long)__builtin_frame_address(0);
pc = (unsigned long)walk_stackframe;
} else {
fp = tsk->thread.s[6];
pc = tsk->thread.ra;
}
if (!__kernel_text_address(pc) || fn(pc, data))
return;
frame.pc = pc;
frame.fp = fp;
while (1) {
int ret;
ret = unwind_frame(tsk, &frame);
if (ret < 0)
break;
if (fn(frame.pc, data))
break;
}
}
EXPORT_SYMBOL_GPL(walk_stackframe);
#else /* !CONFIG_FRAME_POINTER */
void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs,
int (*fn)(unsigned long, void *), void *data)
{
unsigned long *sp = (unsigned long *)task_thread_info(tsk)->pcb.ksp;
unsigned long addr;
WARN_ON(trace->nr_entries || !trace->max_entries);
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr) &&
!in_sched_functions(addr)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
unsigned long *ksp;
unsigned long sp, pc;
if (regs) {
sp = (unsigned long)(regs+1);
pc = regs->pc;
} else if (tsk == current || tsk == NULL) {
register unsigned long current_sp __asm__ ("$30");
sp = current_sp;
pc = (unsigned long)walk_stackframe;
} else {
sp = tsk->thread.sp;
pc = tsk->thread.ra;
}
ksp = (unsigned long *)sp;
while (!kstack_end(ksp)) {
if (__kernel_text_address(pc) && fn(pc, data))
break;
pc = (*ksp++) - 0x4;
}
}
EXPORT_SYMBOL_GPL(walk_stackframe);
#endif/* CONFIG_FRAME_POINTER */
static int print_address_trace(unsigned long pc, void *data)
{
print_ip_sym((const char *)data, pc);
return 0;
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_info("Trace:\n");
walk_stackframe(task, NULL, print_address_trace, (void *)loglvl);
}
#ifdef CONFIG_STACKTRACE
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
struct stack_trace_data {
struct stack_trace *trace;
unsigned int nosched;
};
int save_trace(unsigned long pc, void *d)
{
struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace;
if (data->nosched && in_sched_functions(pc))
return 0;
if (trace->skip > 0) {
trace->skip--;
return 0;
}
trace->entries[trace->nr_entries++] = pc;
return (trace->nr_entries >= trace->max_entries);
}
static void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
data.trace = trace;
data.nosched = nosched;
walk_stackframe(tsk, NULL, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace, 1);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current, trace, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
static int save_pc(unsigned long pc, void *data)
{
unsigned long *p = data;
*p = 0;
if (!in_sched_functions(pc))
*p = pc;
return *p;
}
unsigned long get_wchan(struct task_struct *tsk)
{
unsigned long pc;
if (!tsk || tsk == current || tsk->state == TASK_RUNNING)
return 0;
walk_stackframe(tsk, NULL, save_pc, &pc);
return pc;
}
......@@ -73,7 +73,7 @@
63 common getpgrp sys_getpgrp
#64 is unused
#65 is unused
66 common vfork sw64_vfork
66 common vfork sys_vfork
67 common stat sys_newstat
68 common lstat sys_newlstat
#69 is unused
......@@ -289,7 +289,7 @@
279 common fsmount sys_fsmount
280 common fspick sys_fspick
281 common pidfd_open sys_pidfd_open
282 common clone3 sw64_clone3
282 common clone3 sys_clone3
283 common close_range sys_close_range
284 common openat2 sys_openat2
285 common pidfd_getfd sys_pidfd_getfd
......@@ -319,7 +319,7 @@
309 common get_kernel_syms sys_ni_syscall
310 common syslog sys_syslog
311 common reboot sys_reboot
312 common clone sw64_clone
312 common clone sys_clone
313 common uselib sys_uselib
314 common mlock sys_mlock
315 common munlock sys_munlock
......
......@@ -12,18 +12,24 @@
#include <linux/extable.h>
#include <linux/perf_event.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/kexec.h>
#include <linux/kallsyms.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/debug.h>
#include <asm/gentrap.h>
#include <asm/mmu_context.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#include <asm/uprobes.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include "proto.h"
void
dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
void dik_show_regs(struct pt_regs *regs)
{
printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
regs->pc, regs->r26, regs->ps, print_tainted());
......@@ -36,13 +42,12 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
regs->r6, regs->r7, regs->r8);
if (r9_15) {
printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
r9_15[9], r9_15[10], r9_15[11]);
printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
r9_15[12], r9_15[13], r9_15[14]);
printk("s6 = %016lx\n", r9_15[15]);
}
printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
regs->r9, regs->r10, regs->r11);
printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
regs->r12, regs->r13, regs->r14);
printk("s6 = %016lx\n",
regs->r15);
printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
regs->r16, regs->r17, regs->r18);
......@@ -70,55 +75,7 @@ dik_show_code(unsigned int *pc)
printk("\n");
}
static void
dik_show_trace(unsigned long *sp, const char *loglvl)
{
long i = 0;
unsigned long tmp;
printk("%sTrace:\n", loglvl);
while (0x1ff8 & (unsigned long)sp) {
tmp = *sp;
sp++;
if (!__kernel_text_address(tmp))
continue;
printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
if (i > 40) {
printk("%s ...", loglvl);
break;
}
}
printk("\n");
}
static int kstack_depth_to_print = 24;
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
unsigned long *stack;
int i;
/*
* debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
* back trace for this cpu.
*/
if (sp == NULL)
sp = (unsigned long *)&sp;
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
if (i && ((i % 4) == 0))
printk("%s ", loglvl);
printk("%016lx ", *stack++);
}
printk("\n");
dik_show_trace(sp, loglvl);
}
void
die_if_kernel(char *str, struct pt_regs *regs, long err, unsigned long *r9_15)
void die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (regs->ps & 8)
return;
......@@ -126,9 +83,9 @@ die_if_kernel(char *str, struct pt_regs *regs, long err, unsigned long *r9_15)
printk("CPU %d ", hard_smp_processor_id());
#endif
printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
dik_show_regs(regs, r9_15);
dik_show_regs(regs);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
show_stack(current, NULL, KERN_EMERG);
dik_show_code((unsigned int *)regs->pc);
if (test_and_set_thread_flag(TIF_DIE_IF_KERNEL)) {
......@@ -178,7 +135,7 @@ do_entArith(unsigned long summary, unsigned long write_mask,
if (si_code == 0)
return;
}
die_if_kernel("Arithmetic fault", regs, 0, NULL);
die_if_kernel("Arithmetic fault", regs, 0);
force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc, 0);
}
......@@ -205,7 +162,7 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs)
return;
}
die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
regs, type, NULL);
regs, type);
}
switch (type) {
......@@ -297,15 +254,14 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs)
return;
}
if ((regs->ps & ~IPL_MAX) == 0)
die_if_kernel("Instruction fault", regs, type, NULL);
die_if_kernel("Instruction fault", regs, type);
break;
case 3: /* FEN fault */
/*
* Irritating users can call HMC_clrfen to disable the
* FPU for the process. The kernel will then trap in
* do_switch_stack and undo_switch_stack when we try
* to save and restore the FP registers.
* FPU for the process. The kernel will then trap to
* save and restore the FP registers.
* Given that GCC by default generates code that uses the
* FP registers, HMC_clrfen is not useful except for DoS
......@@ -324,47 +280,15 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs)
force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0);
}
/*
* entUna has a different register layout to be reasonably simple. It
* needs access to all the integer registers (the kernel doesn't use
* fp-regs), and it needs to have them in order for simpler access.
*
* Due to the non-standard register layout (and because we don't want
* to handle floating-point regs), user-mode unaligned accesses are
* handled separately by do_entUnaUser below.
*
* Oh, btw, we don't handle the "gp" register correctly, but if we fault
* on a gp-register unaligned load/store, something is _very_ wrong
* in the kernel anyway..
*/
struct allregs {
unsigned long regs[32];
unsigned long ps, pc, gp, a0, a1, a2;
};
struct unaligned_stat {
unsigned long count, va, pc;
} unaligned[2];
/* Macro for exception fixup code to access integer registers. */
#define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r) + 19 : (r)])
asmlinkage void
do_entUna(void *va, unsigned long opcode, unsigned long reg,
struct allregs *regs)
struct pt_regs *regs)
{
long error;
unsigned long tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
unsigned long pc = regs->pc - 4;
unsigned long *_regs = regs->regs;
const struct exception_table_entry *fixup;
unaligned[0].count++;
unaligned[0].va = (unsigned long) va;
unaligned[0].pc = pc;
/*
* We don't want to use the generic get/put unaligned macros as
* we want to trap exceptions. Only if we actually get an
......@@ -390,7 +314,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
if (error)
goto got_exception;
una_reg(reg) = tmp1 | tmp2;
map_regs(reg) = tmp1 | tmp2;
return;
case 0x22:
......@@ -411,7 +335,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
if (error)
goto got_exception;
una_reg(reg) = (int)(tmp1 | tmp2);
map_regs(reg) = (int)(tmp1 | tmp2);
return;
case 0x23: /* ldl */
......@@ -432,7 +356,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
if (error)
goto got_exception;
una_reg(reg) = tmp1 | tmp2;
map_regs(reg) = tmp1 | tmp2;
return;
case 0x29: /* sth */
......@@ -450,7 +374,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0));
: "r"(va), "r"(map_regs(reg)), "0"(0));
if (error)
goto got_exception;
......@@ -482,7 +406,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0));
: "r"(va), "r"(map_regs(reg)), "0"(0));
if (error)
goto got_exception;
......@@ -534,7 +458,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
".previous"
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3),
"=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8)
: "r"(va), "r"(una_reg(reg)), "0"(0));
: "r"(va), "r"(map_regs(reg)), "0"(0));
if (error)
goto got_exception;
......@@ -553,7 +477,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
if (fixup != 0) {
unsigned long newpc;
newpc = fixup_exception(una_reg, fixup, pc);
newpc = fixup_exception(map_regs, fixup, pc);
printk("Forwarding unaligned exception at %lx (%lx)\n",
pc, newpc);
......@@ -569,31 +493,9 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg,
printk("%s(%d): unhandled unaligned exception\n",
current->comm, task_pid_nr(current));
printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
pc, una_reg(26), regs->ps);
printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
una_reg(0), una_reg(1), una_reg(2));
printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
una_reg(3), una_reg(4), una_reg(5));
printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
una_reg(6), una_reg(7), una_reg(8));
printk("r9 = %016lx r10= %016lx r11= %016lx\n",
una_reg(9), una_reg(10), una_reg(11));
printk("r12= %016lx r13= %016lx r14= %016lx\n",
una_reg(12), una_reg(13), una_reg(14));
printk("r15= %016lx\n", una_reg(15));
printk("r16= %016lx r17= %016lx r18= %016lx\n",
una_reg(16), una_reg(17), una_reg(18));
printk("r19= %016lx r20= %016lx r21= %016lx\n",
una_reg(19), una_reg(20), una_reg(21));
printk("r22= %016lx r23= %016lx r24= %016lx\n",
una_reg(22), una_reg(23), una_reg(24));
printk("r25= %016lx r27= %016lx r28= %016lx\n",
una_reg(25), una_reg(27), una_reg(28));
printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
dik_show_regs(regs);
dik_show_code((unsigned int *)pc);
dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
show_stack(current, NULL, KERN_EMERG);
if (test_and_set_thread_flag(TIF_DIE_IF_KERNEL)) {
printk("die_if_kernel recursion detected.\n");
......@@ -671,20 +573,6 @@ s_reg_to_mem(unsigned long s_reg)
1L << 0x2c | 1L << 0x2d | /* stw stl */ \
1L << 0x0d | 1L << 0x0e) /* sth stb */
#define R(x) ((size_t) &((struct pt_regs *)0)->x)
static int unauser_reg_offsets[32] = {
R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
/* r9 ... r15 are stored in front of regs. */
-56, -48, -40, -32, -24, -16, -8,
R(r16), R(r17), R(r18),
R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
R(r27), R(r28), R(gp),
0, 0
};
#undef R
asmlinkage void
do_entUnaUser(void __user *va, unsigned long opcode,
unsigned long reg, struct pt_regs *regs)
......@@ -729,15 +617,11 @@ do_entUnaUser(void __user *va, unsigned long opcode,
if ((unsigned long)va >= TASK_SIZE)
goto give_sigsegv;
++unaligned[1].count;
unaligned[1].va = (unsigned long)va;
unaligned[1].pc = regs->pc - 4;
if ((1L << opcode) & OP_INT_MASK) {
/* it's an integer load/store */
if (reg < 30) {
reg_addr = (unsigned long *)
((char *)regs + unauser_reg_offsets[reg]);
((char *)regs + regoffsets[reg]);
} else if (reg == 30) {
/* usp in HMCODE regs */
fake_reg = rdusp();
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Mao Minkai
* Author: Mao Minkai
*
* This code is taken from arch/mips/kernel/segment.c
* Copyright (C) 2013 Imagination Technologies Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <asm/unaligned.h>
#include <asm/debug.h>
static int show_unaligned(struct seq_file *sf, void *v)
{
extern struct unaligned_stat {
unsigned long count, va, pc;
} unaligned[2];
seq_printf(sf, "kernel unaligned acc\t: %ld (pc=%lx, va=%lx)\n", unaligned[0].count, unaligned[0].pc, unaligned[0].va);
seq_printf(sf, "user unaligned acc\t: %ld (pc=%lx, va=%lx)\n", unaligned[1].count, unaligned[1].pc, unaligned[1].va);
return 0;
}
static int unaligned_open(struct inode *inode, struct file *file)
{
return single_open(file, show_unaligned, NULL);
}
static const struct file_operations unaligned_fops = {
.open = unaligned_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init unaligned_info(void)
{
struct dentry *unaligned;
if (!sw64_debugfs_dir)
return -ENODEV;
unaligned = debugfs_create_file("unaligned", S_IRUGO,
sw64_debugfs_dir, NULL,
&unaligned_fops);
if (!unaligned)
return -ENOMEM;
return 0;
}
device_initcall(unaligned_info);
......@@ -42,6 +42,13 @@ config KVM_SW64_HOST
Provides host support for SW64 processors.
To compile this as a module, choose M here.
config KVM_MEMHOTPLUG
bool "Memory hotplug support for guest"
depends on KVM
help
Provides memory hotplug support for SW64 guest.
source "drivers/vhost/Kconfig"
endif # VIRTUALIZATION
......@@ -15,18 +15,16 @@ ENTRY(__sw64_vcpu_run)
/* save host fpregs */
ldl $1, TI_TASK($8)
ldi $1, TASK_THREAD($1)
rfpcr $f0
fstd $f0, THREAD_FPCR($1)
ldi $1, THREAD_CTX_FP($1)
vstd $f2, CTX_FP_F2($1)
vstd $f3, CTX_FP_F3($1)
vstd $f4, CTX_FP_F4($1)
vstd $f5, CTX_FP_F5($1)
vstd $f6, CTX_FP_F6($1)
vstd $f7, CTX_FP_F7($1)
vstd $f8, CTX_FP_F8($1)
vstd $f9, CTX_FP_F9($1)
fstd $f0, TASK_THREAD_FPCR($1)
vstd $f2, TASK_THREAD_F2($1)
vstd $f3, TASK_THREAD_F3($1)
vstd $f4, TASK_THREAD_F4($1)
vstd $f5, TASK_THREAD_F5($1)
vstd $f6, TASK_THREAD_F6($1)
vstd $f7, TASK_THREAD_F7($1)
vstd $f8, TASK_THREAD_F8($1)
vstd $f9, TASK_THREAD_F9($1)
ldi sp, -VCPU_RET_SIZE(sp)
/* r16 = guest kvm_vcpu_arch.vcb struct pointer */
......@@ -34,20 +32,15 @@ ENTRY(__sw64_vcpu_run)
/* r18 = hcall args */
/* save host pt_regs to current kernel stack */
ldi sp, -PT_REGS_SIZE(sp)
stl $8, PT_REGS_R8(sp)
stl $9, PT_REGS_R9(sp)
stl $10, PT_REGS_R10(sp)
stl $11, PT_REGS_R11(sp)
stl $12, PT_REGS_R12(sp)
stl $13, PT_REGS_R13(sp)
stl $14, PT_REGS_R14(sp)
stl $15, PT_REGS_R15(sp)
stl $26, PT_REGS_R26(sp)
/* save host switch stack to current kernel stack */
ldi sp, -SWITCH_STACK_SIZE(sp)
stl $9, SWITCH_STACK_R9(sp)
stl $10, SWITCH_STACK_R10(sp)
stl $11, SWITCH_STACK_R11(sp)
stl $12, SWITCH_STACK_R12(sp)
stl $13, SWITCH_STACK_R13(sp)
stl $14, SWITCH_STACK_R14(sp)
stl $15, SWITCH_STACK_R15(sp)
/* restore guest switch stack from guest kvm_regs struct */
ldl $0, KVM_REGS_R0($17)
ldl $1, KVM_REGS_R1($17)
......@@ -203,27 +196,22 @@ $g_setfpec_over:
stl $27, KVM_REGS_R27($17)
stl $28, KVM_REGS_R28($17)
/* restore host switch stack from host sp */
ldl $9, SWITCH_STACK_R9(sp)
ldl $10, SWITCH_STACK_R10(sp)
ldl $11, SWITCH_STACK_R11(sp)
ldl $12, SWITCH_STACK_R12(sp)
ldl $13, SWITCH_STACK_R13(sp)
ldl $14, SWITCH_STACK_R14(sp)
ldl $15, SWITCH_STACK_R15(sp)
ldi sp, SWITCH_STACK_SIZE(sp)
/* restore host regs from host sp */
ldl $8, PT_REGS_R8(sp)
ldl $9, PT_REGS_R9(sp)
ldl $10, PT_REGS_R10(sp)
ldl $11, PT_REGS_R11(sp)
ldl $12, PT_REGS_R12(sp)
ldl $13, PT_REGS_R13(sp)
ldl $14, PT_REGS_R14(sp)
ldl $15, PT_REGS_R15(sp)
ldl $26, PT_REGS_R26(sp)
ldi sp, PT_REGS_SIZE(sp)
ldi $8, 0x3fff
bic sp, $8, $8
/* restore host fpregs */
ldl $1, TI_TASK($8)
ldi $1, TASK_THREAD($1)
fldd $f0, THREAD_FPCR($1)
fldd $f0, TASK_THREAD_FPCR($1)
wfpcr $f0
fimovd $f0, $2
and $2, 0x3, $2
......@@ -243,15 +231,14 @@ $setfpec_1:
$setfpec_2:
setfpec2
$setfpec_over:
ldi $1, THREAD_CTX_FP($1)
vldd $f2, CTX_FP_F2($1)
vldd $f3, CTX_FP_F3($1)
vldd $f4, CTX_FP_F4($1)
vldd $f5, CTX_FP_F5($1)
vldd $f6, CTX_FP_F6($1)
vldd $f7, CTX_FP_F7($1)
vldd $f8, CTX_FP_F8($1)
vldd $f9, CTX_FP_F9($1)
vldd $f2, TASK_THREAD_F2($1)
vldd $f3, TASK_THREAD_F3($1)
vldd $f4, TASK_THREAD_F4($1)
vldd $f5, TASK_THREAD_F5($1)
vldd $f6, TASK_THREAD_F6($1)
vldd $f7, TASK_THREAD_F7($1)
vldd $f8, TASK_THREAD_F8($1)
vldd $f9, TASK_THREAD_F9($1)
/* if $0 > 0, handle hcall */
bgt $0, $ret_to
......@@ -261,25 +248,17 @@ $setfpec_over:
/* Hmcode will setup in */
/* restore $16 $17 $18, do interrupt trick */
ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE + SWITCH_STACK_SIZE)(sp)
ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE)(sp)
ldl $16, HOST_INT_R16(sp)
ldl $17, HOST_INT_R17(sp)
ldl $18, HOST_INT_R18(sp)
ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE + SWITCH_STACK_SIZE)(sp)
ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE)(sp)
ldi $8, 0x3fff
bic sp, $8, $8
ldi $19, -PT_REGS_SIZE(sp)
ldi $26, ret_from_do_entInt_noregs
call $31, do_entInt
/* ret($0) indicate hcall number */
ret_from_do_entInt_noregs:
call $26, do_entInt
ldl $26, VCPU_RET_RA(sp)
ldl $0, VCPU_RET_R0(sp)
/* restore r16 - r19 */
$ret_to:
/* ret($0) indicate hcall number */
ldi sp, VCPU_RET_SIZE(sp) /* pop stack */
ret
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册