提交 17ed838a 编写于 作者: Y Yanyan Jiang

cleanups

上级 9380b3c0
......@@ -75,17 +75,16 @@ _Device *_device(int n);
// ================== Asynchronous Extension (ASYE) ==================
int _asye_init(_Context *(*handler)(_Event ev, _Context *ctx));
_Context *_kcontext(_Area kstack, void (*entry)(void *), void *arg);
void _yield();
int _intr_read();
void _intr_write(int enable);
_Context *_kcontext(_Area kstack, void (*entry)(void *), void *arg);
// =================== Protection Extension (PTE) ====================
int _pte_init(void *(*pgalloc)(size_t size), void (*pgfree)(void *));
int _protect(_Protect *p);
void _unprotect(_Protect *p);
void _prot_switch(_Protect *p);
int _map(_Protect *p, void *va, void *pa, int prot);
_Context *_ucontext(_Protect *p, _Area ustack, _Area kstack,
void *entry, void *args);
......
......@@ -28,7 +28,7 @@ void percpu_initirq();
void percpu_initgdt();
void percpu_initlapic();
void percpu_initpg();
void thiscpu_setustk(uintptr_t ss0, uintptr_t esp0);
void thiscpu_setstk0(uintptr_t ss0, uintptr_t esp0);
void thiscpu_halt() __attribute__((__noreturn__));
void othercpu_halt();
......@@ -45,7 +45,7 @@ void othercpu_halt();
cli(); \
while (1) { \
if (0 == _atomic_xchg(&name##_locked, 1)) break; \
__asm__ volatile ("pause"); \
pause(); \
} \
} \
void name##_unlock() { \
......
#ifndef __X86_H__
#define __X86_H__
// CPU rings
#define DPL_KERN 0x0 // Kernel (ring 0)
#define DPL_USER 0x3 // User (ring 3)
......@@ -94,7 +95,7 @@
#define EX_PF 14
#define EX_MF 15
// Below are only valid for c/cpp files
// Below are only defined for c/cpp files
#ifndef __ASSEMBLER__
#include <arch.h>
......@@ -106,18 +107,12 @@
// \--- PDX(va) --/ \--- PTX(va) --/\------ OFF(va) ------/
typedef uint32_t PTE;
typedef uint32_t PDE;
#define PDX(va) (((uint32_t)(va) >> PDXSHFT) & 0x3ff)
#define PTX(va) (((uint32_t)(va) >> PTXSHFT) & 0x3ff)
#define OFF(va) ((uint32_t)(va) & 0xfff)
// construct virtual address from indexes and offset
#define PGADDR(d, t, o) ((uint32_t)((d)<<PDXSHFT | (t)<<PTXSHFT | (o)))
#define PDX(va) (((uint32_t)(va) >> PDXSHFT) & 0x3ff)
#define PTX(va) (((uint32_t)(va) >> PTXSHFT) & 0x3ff)
#define OFF(va) ((uint32_t)(va) & 0xfff)
#define ROUNDUP(a, sz) ((((uintptr_t)a)+(sz)-1) & ~((sz)-1))
#define ROUNDDOWN(a, sz) ((((uintptr_t)a)) & ~((sz)-1))
// Address in page table or page directory entry
#define PTE_ADDR(pte) ((uint32_t)(pte) & ~0xfff)
#define PTE_ADDR(pte) ((uint32_t)(pte) & ~0xfff)
// Segment Descriptor
typedef struct SegDesc {
......@@ -209,63 +204,68 @@ typedef struct MPDesc {
uint8_t reserved[3];
} MPDesc;
// Instruction wrappers
#define asm __asm__
static inline uint8_t inb(int port) {
uint8_t data;
__asm__ volatile ("inb %1, %0" : "=a"(data) : "d"((uint16_t)port));
asm volatile ("inb %1, %0" : "=a"(data) : "d"((uint16_t)port));
return data;
}
static inline uint32_t inw(int port) {
uint16_t data;
__asm__ volatile ("inw %1, %0" : "=a"(data) : "d"((uint16_t)port));
asm volatile ("inw %1, %0" : "=a"(data) : "d"((uint16_t)port));
return data;
}
static inline uint32_t inl(int port) {
uint32_t data;
__asm__ volatile ("inl %1, %0" : "=a"(data) : "d"((uint16_t)port));
asm volatile ("inl %1, %0" : "=a"(data) : "d"((uint16_t)port));
return data;
}
static inline void outb(int port, uint8_t data) {
__asm__ volatile ("outb %%al, %%dx" : : "a"(data), "d"((uint16_t)port));
asm volatile ("outb %%al, %%dx" : : "a"(data), "d"((uint16_t)port));
}
static inline void outw(int port, uint16_t data) {
__asm__ volatile ("outw %%ax, %%dx" : : "a"(data), "d"((uint16_t)port));
asm volatile ("outw %%ax, %%dx" : : "a"(data), "d"((uint16_t)port));
}
static inline void outl(int port, uint32_t data) {
__asm__ volatile ("outl %%eax, %%dx" : : "a"(data), "d"((uint16_t)port));
asm volatile ("outl %%eax, %%dx" : : "a"(data), "d"((uint16_t)port));
}
static inline void cli() {
__asm__ volatile ("cli");
asm volatile ("cli");
}
static inline void sti() {
__asm__ volatile ("sti");
asm volatile ("sti");
}
static inline void hlt() {
__asm__ volatile ("hlt");
asm volatile ("hlt");
}
static inline void pause() {
asm volatile ("pause");
}
static inline uint32_t get_efl() {
volatile uint32_t efl;
__asm__ volatile ("pushf; pop %0": "=r"(efl));
asm volatile ("pushf; pop %0": "=r"(efl));
return efl;
}
static inline uint32_t get_cr0(void) {
volatile uint32_t val;
__asm__ volatile ("movl %%cr0, %0" : "=r"(val));
asm volatile ("movl %%cr0, %0" : "=r"(val));
return val;
}
static inline void set_cr0(uint32_t cr0) {
__asm__ volatile ("movl %0, %%cr0" : : "r"(cr0));
asm volatile ("movl %0, %%cr0" : : "r"(cr0));
}
......@@ -274,7 +274,7 @@ static inline void set_idt(GateDesc *idt, int size) {
data[0] = size - 1;
data[1] = (uint32_t)idt;
data[2] = (uint32_t)idt >> 16;
__asm__ volatile ("lidt (%0)" : : "r"(data));
asm volatile ("lidt (%0)" : : "r"(data));
}
static inline void set_gdt(SegDesc *gdt, int size) {
......@@ -282,21 +282,21 @@ static inline void set_gdt(SegDesc *gdt, int size) {
data[0] = size - 1;
data[1] = (uint32_t)gdt;
data[2] = (uint32_t)gdt >> 16;
__asm__ volatile ("lgdt (%0)" : : "r"(data));
asm volatile ("lgdt (%0)" : : "r"(data));
}
static inline void set_tr(int selector) {
__asm__ volatile ("ltr %0" : : "r"((uint16_t)selector));
asm volatile ("ltr %0" : : "r"((uint16_t)selector));
}
static inline uint32_t get_cr2() {
volatile uint32_t val;
__asm__ volatile ("movl %%cr2, %0" : "=r"(val));
asm volatile ("movl %%cr2, %0" : "=r"(val));
return val;
}
static inline void set_cr3(void *pdir) {
__asm__ volatile ("movl %0, %%cr3" : : "r"(pdir));
asm volatile ("movl %0, %%cr3" : : "r"(pdir));
}
#endif
......
#include <am-x86.h>
#include <stdarg.h>
#define IDT_ENTRY_DECL(id, dpl, err) \
void irq##id();
static _Context* (*user_handler)(_Event, _Context*) = NULL;
static GateDesc idt[NR_IRQ];
IRQS(IDT_ENTRY_DECL)
#define IRQHANDLE_DECL(id, dpl, err) void irq##id();
IRQS(IRQHANDLE_DECL)
void irqall();
int asye_init(_Context *(*handler)(_Event, _Context *)) {
......@@ -21,33 +20,12 @@ int asye_init(_Context *(*handler)(_Event, _Context *)) {
user_handler = handler;
percpu_initirq();
return 0;
}
static void panic_on_return() {
panic("kernel context returns");
}
_Context *kcontext(_Area stack, void (*entry)(void *), void *arg) {
_Context *ctx = (_Context *)stack.start;
*ctx = (_Context) {
.eax = 0, .ebx = 0, .ecx = 0, .edx = 0,
.esi = 0, .edi = 0, .ebp = 0, .esp3 = 0,
.ss0 = 0, .esp0 = (uint32_t)stack.end,
.cs = KSEL(SEG_KCODE), .eip = (uint32_t)entry, .eflags = FL_IF,
.ds = KSEL(SEG_KDATA), .es = KSEL(SEG_KDATA), .ss = KSEL(SEG_KDATA),
};
void **esp = (void **)(((uint32_t)ctx->esp0) - 2 * sizeof(uint32_t));
esp[0] = panic_on_return;
esp[1] = arg;
ctx->esp0 = (uint32_t)esp;
return ctx;
}
void yield() {
if (!user_handler) panic("no interrupt handler");
__asm__ volatile ("int $0x80" : : "a"(-1));
asm volatile ("int $0x80" : : "a"(-1));
}
int intr_read() {
......@@ -64,6 +42,26 @@ void intr_write(int enable) {
}
}
static void panic_on_return() { panic("kernel context returns"); }
_Context *kcontext(_Area stack, void (*entry)(void *), void *arg) {
_Context *ctx = (_Context *)stack.start;
*ctx = (_Context) {
.eax = 0, .ebx = 0, .ecx = 0, .edx = 0,
.esi = 0, .edi = 0, .ebp = 0, .esp3 = 0,
.ss0 = 0, .esp0 = (uint32_t)stack.end,
.cs = KSEL(SEG_KCODE), .eip = (uint32_t)entry, .eflags = FL_IF,
.ds = KSEL(SEG_KDATA), .es = KSEL(SEG_KDATA), .ss = KSEL(SEG_KDATA),
};
void *values[] = { panic_on_return, arg }; // copy to stack
ctx->esp0 -= sizeof(values);
for (int i = 0; i < sizeof(values) / sizeof(void *); i++) {
((uintptr_t *)ctx->esp0)[i] = (uintptr_t)values[i];
}
return ctx;
}
#define IRQ T_IRQ0 +
#define MSG(m) : ev.msg = m;
......@@ -164,15 +162,15 @@ void irq_handle(TrapFrame *tf) {
#define def(r) , [r] "m"(ret_ctx->r) // -> [eax] "m"(ret_ctx->eax)
if (ret_ctx->cs & DPL_USER) { // return to user
thiscpu_setustk(ret_ctx->ss0, ret_ctx->esp0);
__asm__ volatile goto (
thiscpu_setstk0(ret_ctx->ss0, ret_ctx->esp0);
asm volatile goto (
"movl %[esp], %%esp;" // move stack
REGS_USER(push) // push reg context onto stack
"jmp %l[iret]" // goto iret
: : [esp] "m"(ret_ctx->esp0)
REGS_USER(def) : : iret );
} else { // return to kernel
__asm__ volatile goto (
asm volatile goto (
"movl %[esp], %%esp;" // move stack
REGS_KERNEL(push) // push reg context onto stack
"jmp %l[iret]" // goto iret
......@@ -180,7 +178,7 @@ void irq_handle(TrapFrame *tf) {
REGS_KERNEL(def) : : iret );
}
iret:
__asm__ volatile (
asm volatile (
"popal;" // restore context
"popl %es;"
"popl %ds;"
......
......@@ -6,7 +6,7 @@ uint64_t uptsc;
static inline uint64_t rdtsc() {
uint32_t lo, hi;
__asm__ volatile ("rdtsc": "=a"(lo), "=d"(hi));
asm volatile ("rdtsc": "=a"(lo), "=d"(hi));
return ((uint64_t)hi << 32) | lo;
}
......
......@@ -7,42 +7,43 @@ volatile struct boot_info *boot_rec = (void *)0x7000;
static void percpu_entry();
static void ap_entry();
static void stack_switch(void (*entry)());
static void jump_to(void (*entry)());
int _mpe_init(void (*entry)()) {
user_entry = entry;
stack_switch(percpu_entry); // switch stack, and the bootstrap stack at
// 0x7000 can be reused by ap's bootloader
jump_to(percpu_entry);
panic("mp_init should not return");
return 1;
}
int _cpu(void) {
return lapic[8] >> 24;
}
int _ncpu() {
return ncpu;
}
int _cpu(void) {
return lapic[8] >> 24;
}
intptr_t _atomic_xchg(volatile intptr_t *addr, intptr_t newval) {
intptr_t result;
__asm__ volatile ("lock xchgl %0, %1":
"+m"(*addr), "=a"(result): "1"(newval): "cc");
asm volatile ("lock xchgl %0, %1":
"+m"(*addr), "=a"(result) : "1"(newval) : "cc");
return result;
}
static void percpu_entry() { // all cpus execute percpu_entry()
if (_cpu() != 0) {
// init an ap
stack_switch(ap_entry);
jump_to(ap_entry);
} else {
// stack already swithced, boot all aps
for (int cpu = 1; cpu < ncpu; cpu ++) {
for (int cpu = 1; cpu < ncpu; cpu++) {
boot_rec->is_ap = 1;
boot_rec->entry = percpu_entry;
lapic_bootap(cpu, 0x7c00);
while (_atomic_xchg(&apboot_done, 0) != 1);
while (_atomic_xchg(&apboot_done, 0) != 1) {
pause();
}
}
user_entry();
}
......@@ -53,14 +54,14 @@ static void ap_entry() {
percpu_initirq();
percpu_initlapic();
percpu_initpg();
ioapic_enable(IRQ_KBD, _cpu());
_atomic_xchg(&apboot_done, 1);
user_entry();
}
static void stack_switch(void (*entry)()) {
static uint8_t cpu_stk[MAX_CPU][4096]; // each cpu gets a 4KB stack
__asm__ volatile (
"movl %0, %%esp;"
"call *%1" : : "r"(&cpu_stk[_cpu() + 1][0]), "r"(entry));
static void jump_to(void (*entry)()) {
static uint8_t cpu_stk[MAX_CPU][4096];
asm volatile (
"movl %0, %%esp;" // switch stack, and the bootstrap stack at
"call *%1" // 0x7000 can be reused by ap's bootloader
: : "r"(&cpu_stk[_cpu() + 1][0]), "r"(entry));
}
......@@ -66,7 +66,7 @@ void unprotect(_Protect *p) {
va += (1 << PDXSHFT)) {
PDE pde = upt[PDX(va)];
if (pde & PTE_P) {
pgfree((void*)PTE_ADDR(pde));
pgfree((void *)PTE_ADDR(pde));
}
}
pgfree(upt);
......@@ -77,13 +77,13 @@ void prot_switch(_Protect *p) {
}
int map(_Protect *p, void *va, void *pa, int prot) {
// panic because the below cases are likely bugs
if ((prot & _PROT_NONE) && (prot != _PROT_NONE))
panic("invalid permission");
panic("invalid protection flags");
if ((uintptr_t)va != ROUNDDOWN(va, PGSIZE) ||
(uintptr_t)pa != ROUNDDOWN(pa, PGSIZE)) {
panic("unaligned memory address");
}
// panic because the above cases are likely bugs
if (!in_range(va, prot_vm_range)) {
return 1; // mapping an out-of-range address
}
......
......@@ -3,6 +3,27 @@
TSS tss[MAX_CPU];
SegDesc gdts[MAX_CPU][NR_SEG];
void bootcpu_init() {
#define MAGIC 0x5f504d5f
for (char *st = (char *)0xf0000; st != (char *)0xffffff; st ++) {
if (*(volatile uint32_t *)st == MAGIC) {
volatile MPConf *conf = ((volatile MPDesc *)st)->conf;
lapic = conf->lapicaddr;
for (volatile char *ptr = (char *)(conf + 1);
ptr < (char *)conf + conf->length; ptr += 8) {
if (*ptr == '\0') {
ptr += 12;
if (++ncpu > MAX_CPU) {
panic("cannot support > MAX_CPU processors");
}
}
}
return;
}
}
panic("seems not an x86-qemu machine");
}
void percpu_initgdt() {
int cpu = _cpu();
SegDesc *gdt = gdts[cpu];
......@@ -15,7 +36,7 @@ void percpu_initgdt() {
set_tr(KSEL(SEG_TSS));
}
void thiscpu_setustk(uintptr_t ss0, uintptr_t esp0) {
void thiscpu_setstk0(uintptr_t ss0, uintptr_t esp0) {
int cpu = _cpu();
tss[cpu].ss0 = ss0;
tss[cpu].esp0 = esp0;
......@@ -35,28 +56,3 @@ void othercpu_halt() {
}
}
}
#define MP_PROC 0x00
#define MP_MAGIC 0x5f504d5f // _MP_
void bootcpu_init() {
for (char *st = (char *)0xf0000; st != (char *)0xffffff; st ++) {
if (*(volatile uint32_t *)st == MP_MAGIC) {
volatile MPConf *conf = ((volatile MPDesc *)st)->conf;
lapic = conf->lapicaddr;
for (volatile char *ptr = (char *)(conf + 1);
ptr < (char *)conf + conf->length; ) {
if (*ptr == MP_PROC) {
ptr += 20;
if (++ncpu > MAX_CPU) {
panic("cannot support > MAX_CPU processors");
}
} else {
ptr += 8;
}
}
return;
}
}
panic("seems not an x86-qemu machine");
}
......@@ -5,7 +5,7 @@
void f() {
_intr_write(1);
if (_cpu() == 0) {
if (_cpu() == 1) {
for (int volatile i = 0; i < 10000000; i++) ;
assert(0);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册