提交 7b0d792c 编写于 作者: R Richard Henderson

cputlb: Move ROM handling from I/O path to TLB path

It does not require going through the whole I/O path
in order to discard a write.
Reviewed-by: NDavid Hildenbrand <david@redhat.com>
Signed-off-by: NRichard Henderson <richard.henderson@linaro.org>
上级 6e050d41
...@@ -577,7 +577,8 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, ...@@ -577,7 +577,8 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
{ {
uintptr_t addr = tlb_entry->addr_write; uintptr_t addr = tlb_entry->addr_write;
if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
addr &= TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK;
addr += tlb_entry->addend; addr += tlb_entry->addend;
if ((addr - start) < length) { if ((addr - start) < length) {
...@@ -745,7 +746,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, ...@@ -745,7 +746,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
address |= TLB_MMIO; address |= TLB_MMIO;
addend = 0; addend = 0;
} else { } else {
/* TLB_MMIO for rom/romd handled below */
addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
} }
...@@ -822,16 +822,17 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, ...@@ -822,16 +822,17 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
tn.addr_write = -1; tn.addr_write = -1;
if (prot & PAGE_WRITE) { if (prot & PAGE_WRITE) {
if ((memory_region_is_ram(section->mr) && section->readonly) tn.addr_write = address;
|| memory_region_is_romd(section->mr)) { if (memory_region_is_romd(section->mr)) {
/* Write access calls the I/O callback. */ /* Use the MMIO path so that the device can switch states. */
tn.addr_write = address | TLB_MMIO; tn.addr_write |= TLB_MMIO;
} else if (memory_region_is_ram(section->mr) } else if (memory_region_is_ram(section->mr)) {
&& cpu_physical_memory_is_clean( if (section->readonly) {
memory_region_get_ram_addr(section->mr) + xlat)) { tn.addr_write |= TLB_DISCARD_WRITE;
tn.addr_write = address | TLB_NOTDIRTY; } else if (cpu_physical_memory_is_clean(
} else { memory_region_get_ram_addr(section->mr) + xlat)) {
tn.addr_write = address; tn.addr_write |= TLB_NOTDIRTY;
}
} }
if (prot & PAGE_WRITE_INV) { if (prot & PAGE_WRITE_INV) {
tn.addr_write |= TLB_INVALID_MASK; tn.addr_write |= TLB_INVALID_MASK;
...@@ -904,7 +905,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, ...@@ -904,7 +905,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
mr = section->mr; mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr; cpu->mem_io_pc = retaddr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { if (mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr); cpu_io_recompile(cpu, retaddr);
} }
...@@ -945,7 +946,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, ...@@ -945,7 +946,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr; mr = section->mr;
mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { if (mr != &io_mem_notdirty && !cpu->can_do_io) {
cpu_io_recompile(cpu, retaddr); cpu_io_recompile(cpu, retaddr);
} }
cpu->mem_io_vaddr = addr; cpu->mem_io_vaddr = addr;
...@@ -1125,7 +1126,7 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, ...@@ -1125,7 +1126,7 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
} }
/* Reject I/O access, or other required slow-path. */ /* Reject I/O access, or other required slow-path. */
if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP)) { if (tlb_addr & (TLB_NOTDIRTY | TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
return NULL; return NULL;
} }
...@@ -1617,6 +1618,11 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, ...@@ -1617,6 +1618,11 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
return; return;
} }
/* Ignore writes to ROM. */
if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
return;
}
haddr = (void *)((uintptr_t)addr + entry->addend); haddr = (void *)((uintptr_t)addr + entry->addend);
/* /*
......
...@@ -88,7 +88,7 @@ static MemoryRegion *system_io; ...@@ -88,7 +88,7 @@ static MemoryRegion *system_io;
AddressSpace address_space_io; AddressSpace address_space_io;
AddressSpace address_space_memory; AddressSpace address_space_memory;
MemoryRegion io_mem_rom, io_mem_notdirty; MemoryRegion io_mem_notdirty;
static MemoryRegion io_mem_unassigned; static MemoryRegion io_mem_unassigned;
#endif #endif
...@@ -192,7 +192,6 @@ typedef struct subpage_t { ...@@ -192,7 +192,6 @@ typedef struct subpage_t {
#define PHYS_SECTION_UNASSIGNED 0 #define PHYS_SECTION_UNASSIGNED 0
#define PHYS_SECTION_NOTDIRTY 1 #define PHYS_SECTION_NOTDIRTY 1
#define PHYS_SECTION_ROM 2
static void io_mem_init(void); static void io_mem_init(void);
static void memory_map_init(void); static void memory_map_init(void);
...@@ -1475,8 +1474,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu, ...@@ -1475,8 +1474,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
iotlb = memory_region_get_ram_addr(section->mr) + xlat; iotlb = memory_region_get_ram_addr(section->mr) + xlat;
if (!section->readonly) { if (!section->readonly) {
iotlb |= PHYS_SECTION_NOTDIRTY; iotlb |= PHYS_SECTION_NOTDIRTY;
} else {
iotlb |= PHYS_SECTION_ROM;
} }
} else { } else {
AddressSpaceDispatch *d; AddressSpaceDispatch *d;
...@@ -3002,38 +2999,6 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) ...@@ -3002,38 +2999,6 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
return phys_section_add(map, &section); return phys_section_add(map, &section);
} }
static void readonly_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
/* Ignore any write to ROM. */
}
static bool readonly_mem_accepts(void *opaque, hwaddr addr,
unsigned size, bool is_write,
MemTxAttrs attrs)
{
return is_write;
}
/* This will only be used for writes, because reads are special cased
* to directly access the underlying host ram.
*/
static const MemoryRegionOps readonly_mem_ops = {
.write = readonly_mem_write,
.valid.accepts = readonly_mem_accepts,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid = {
.min_access_size = 1,
.max_access_size = 8,
.unaligned = false,
},
.impl = {
.min_access_size = 1,
.max_access_size = 8,
.unaligned = false,
},
};
MemoryRegionSection *iotlb_to_section(CPUState *cpu, MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs) hwaddr index, MemTxAttrs attrs)
{ {
...@@ -3047,8 +3012,6 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu, ...@@ -3047,8 +3012,6 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
static void io_mem_init(void) static void io_mem_init(void)
{ {
memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
NULL, NULL, UINT64_MAX);
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX); NULL, UINT64_MAX);
...@@ -3069,8 +3032,6 @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) ...@@ -3069,8 +3032,6 @@ AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
assert(n == PHYS_SECTION_UNASSIGNED); assert(n == PHYS_SECTION_UNASSIGNED);
n = dummy_section(&d->map, fv, &io_mem_notdirty); n = dummy_section(&d->map, fv, &io_mem_notdirty);
assert(n == PHYS_SECTION_NOTDIRTY); assert(n == PHYS_SECTION_NOTDIRTY);
n = dummy_section(&d->map, fv, &io_mem_rom);
assert(n == PHYS_SECTION_ROM);
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
......
...@@ -337,12 +337,15 @@ CPUArchState *cpu_copy(CPUArchState *env); ...@@ -337,12 +337,15 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4)) #define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
/* Set if TLB entry requires byte swap. */ /* Set if TLB entry requires byte swap. */
#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5)) #define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
/* Set if TLB entry writes ignored. */
#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
/* Use this mask to check interception with an alignment mask /* Use this mask to check interception with an alignment mask
* in a TCG backend. * in a TCG backend.
*/ */
#define TLB_FLAGS_MASK \ #define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO | TLB_WATCHPOINT | TLB_BSWAP) (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
| TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
/** /**
* tlb_hit_page: return true if page aligned @addr is a hit against the * tlb_hit_page: return true if page aligned @addr is a hit against the
......
...@@ -100,7 +100,6 @@ void qemu_flush_coalesced_mmio_buffer(void); ...@@ -100,7 +100,6 @@ void qemu_flush_coalesced_mmio_buffer(void);
void cpu_flush_icache_range(hwaddr start, hwaddr len); void cpu_flush_icache_range(hwaddr start, hwaddr len);
extern struct MemoryRegion io_mem_rom;
extern struct MemoryRegion io_mem_notdirty; extern struct MemoryRegion io_mem_notdirty;
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册