提交 c31ca59e 编写于 作者: L Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] fix show_mem for VIRTUAL_MEM_MAP+FLATMEM
  [IA64] align high endpoint of VIRTUAL_MEM_MAP
  [PATCH] Fix RAID5 + IA64 compile
  [IA64] Don't alloc empty frame in ia64_switch_mode_phys
  [IA64] Do not assume output registers be reservered.
  [IA64] add platform check to snsc driver init
  [IA64] sparse cleanups
  [IA64] Fix breakage in simscsi.c
  [IA64] Format /proc/pal/*/version_info correctly
...@@ -151,7 +151,7 @@ static void ...@@ -151,7 +151,7 @@ static void
simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset) simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
{ {
int list_len = sc->use_sg; int list_len = sc->use_sg;
struct scatterlist *sl = (struct scatterlist *)sc->buffer; struct scatterlist *sl = (struct scatterlist *)sc->request_buffer;
struct disk_stat stat; struct disk_stat stat;
struct disk_req req; struct disk_req req;
...@@ -244,7 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) ...@@ -244,7 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
if (scatterlen == 0) if (scatterlen == 0)
memcpy(sc->request_buffer, buf, len); memcpy(sc->request_buffer, buf, len);
else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) {
unsigned thislen = min(len, slp->length); unsigned thislen = min(len, slp->length);
memcpy(page_address(slp->page) + slp->offset, buf, thislen); memcpy(page_address(slp->page) + slp->offset, buf, thislen);
......
...@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr) ...@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr)
if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
return md; return md;
} }
return 0; return NULL;
} }
static efi_memory_desc_t * static efi_memory_desc_t *
...@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr) ...@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr)
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
return md; return md;
} }
return 0; return NULL;
} }
u32 u32
...@@ -923,7 +923,7 @@ find_memmap_space (void) ...@@ -923,7 +923,7 @@ find_memmap_space (void)
void void
efi_memmap_init(unsigned long *s, unsigned long *e) efi_memmap_init(unsigned long *s, unsigned long *e)
{ {
struct kern_memdesc *k, *prev = 0; struct kern_memdesc *k, *prev = NULL;
u64 contig_low=0, contig_high=0; u64 contig_low=0, contig_high=0;
u64 as, ae, lim; u64 as, ae, lim;
void *efi_map_start, *efi_map_end, *p, *q; void *efi_map_start, *efi_map_end, *p, *q;
......
...@@ -853,7 +853,6 @@ END(__ia64_init_fpu) ...@@ -853,7 +853,6 @@ END(__ia64_init_fpu)
*/ */
GLOBAL_ENTRY(ia64_switch_mode_phys) GLOBAL_ENTRY(ia64_switch_mode_phys)
{ {
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip mov r15=ip
} }
...@@ -902,7 +901,6 @@ END(ia64_switch_mode_phys) ...@@ -902,7 +901,6 @@ END(ia64_switch_mode_phys)
*/ */
GLOBAL_ENTRY(ia64_switch_mode_virt) GLOBAL_ENTRY(ia64_switch_mode_virt)
{ {
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip mov r15=ip
} }
......
...@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3); ...@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3); EXPORT_SYMBOL(__umoddi3);
#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) #if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void); extern void xor_ia64_2(void);
extern void xor_ia64_3(void); extern void xor_ia64_3(void);
extern void xor_ia64_4(void); extern void xor_ia64_4(void);
......
...@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) ...@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
.body .body
;; ;;
ld8 loc2 = [loc2] // loc2 <- entry point ld8 loc2 = [loc2] // loc2 <- entry point
mov out0 = in0 // first argument mov loc3 = psr // save psr
mov out1 = in1 // copy arg2
mov out2 = in2 // copy arg3
mov out3 = in3 // copy arg3
;;
mov loc3 = psr // save psr
;; ;;
mov loc4=ar.rsc // save RSE configuration mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical dep.z loc2=loc2,0,61 // convert pal entry point to physical
...@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) ...@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
;; ;;
andcm r16=loc3,r16 // removes bits to clear from psr andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode_phys br.call.sptk.many rp=ia64_switch_mode_phys
.ret6:
mov out0 = in0 // first argument
mov out1 = in1 // copy arg2
mov out2 = in2 // copy arg3
mov out3 = in3 // copy arg3
mov loc5 = r19 mov loc5 = r19
mov loc6 = r20 mov loc6 = r20
br.call.sptk.many rp=b7 // now make the call br.call.sptk.many rp=b7 // now make the call
.ret7:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr mov r16=loc3 // r16= original psr
mov r19=loc5 mov r19=loc5
mov r20=loc6 mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret8: mov psr.l = loc3 // restore init PSR mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1 mov ar.pfs = loc1
mov rp = loc0 mov rp = loc0
;; ;;
......
...@@ -566,29 +566,23 @@ version_info(char *page) ...@@ -566,29 +566,23 @@ version_info(char *page)
pal_version_u_t min_ver, cur_ver; pal_version_u_t min_ver, cur_ver;
char *p = page; char *p = page;
/* The PAL_VERSION call is advertised as being able to support if (ia64_pal_version(&min_ver, &cur_ver) != 0)
* both physical and virtual mode calls. This seems to be a documentation return 0;
* bug rather than firmware bug. In fact, it does only support physical mode.
* So now the code reflects this fact and the pal_version() has been updated
* accordingly.
*/
if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
p += sprintf(p, p += sprintf(p,
"PAL_vendor : 0x%02x (min=0x%02x)\n" "PAL_vendor : 0x%02x (min=0x%02x)\n"
"PAL_A : %x.%x.%x (min=%x.%x.%x)\n" "PAL_A : %02x.%02x (min=%02x.%02x)\n"
"PAL_B : %x.%x.%x (min=%x.%x.%x)\n", "PAL_B : %02x.%02x (min=%02x.%02x)\n",
cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, cur_ver.pal_version_s.pv_pal_vendor,
min_ver.pal_version_s.pv_pal_vendor,
cur_ver.pal_version_s.pv_pal_a_model>>4, cur_ver.pal_version_s.pv_pal_a_model,
cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, cur_ver.pal_version_s.pv_pal_a_rev,
min_ver.pal_version_s.pv_pal_a_model>>4, min_ver.pal_version_s.pv_pal_a_model,
min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, min_ver.pal_version_s.pv_pal_a_rev,
cur_ver.pal_version_s.pv_pal_b_model,
cur_ver.pal_version_s.pv_pal_b_model>>4, cur_ver.pal_version_s.pv_pal_b_rev,
cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, min_ver.pal_version_s.pv_pal_b_model,
min_ver.pal_version_s.pv_pal_b_model>>4, min_ver.pal_version_s.pv_pal_b_rev);
min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
return p - page; return p - page;
} }
......
...@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ ...@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o lib-$(CONFIG_PERFMON) += carta_random.o
lib-$(CONFIG_MD_RAID5) += xor.o lib-$(CONFIG_MD_RAID456) += xor.o
AFLAGS___divdi3.o = AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED AFLAGS___udivdi3.o = -DUNSIGNED
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages; static unsigned long num_dma_physpages;
static unsigned long max_gap;
#endif #endif
/** /**
...@@ -45,9 +46,15 @@ show_mem (void) ...@@ -45,9 +46,15 @@ show_mem (void)
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr; i = max_mapnr;
while (i-- > 0) { for (i = 0; i < max_mapnr; i++) {
if (!pfn_valid(i)) if (!pfn_valid(i)) {
#ifdef CONFIG_VIRTUAL_MEM_MAP
if (max_gap < LARGE_GAP)
continue;
i = vmemmap_find_next_valid_pfn(0, i) - 1;
#endif
continue; continue;
}
total++; total++;
if (PageReserved(mem_map+i)) if (PageReserved(mem_map+i))
reserved++; reserved++;
...@@ -234,7 +241,6 @@ paging_init (void) ...@@ -234,7 +241,6 @@ paging_init (void)
unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES];
unsigned long max_gap;
#endif #endif
/* initialize mem_map[] */ /* initialize mem_map[] */
...@@ -266,7 +272,6 @@ paging_init (void) ...@@ -266,7 +272,6 @@ paging_init (void)
} }
} }
max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) { if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0; vmem_map = (struct page *) 0;
...@@ -277,7 +282,8 @@ paging_init (void) ...@@ -277,7 +282,8 @@ paging_init (void)
/* allocate virtual_mem_map */ /* allocate virtual_mem_map */
map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
vmalloc_end -= map_size; vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end; vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL); efi_memmap_walk(create_mem_map_page_table, NULL);
......
...@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void) ...@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void)
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_VIRTUAL_MEM_MAP
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
{
unsigned long end_address, hole_next_pfn;
unsigned long stop_address;
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
stop_address = (unsigned long) &vmem_map[
pgdat->node_start_pfn + pgdat->node_spanned_pages];
do {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(end_address);
if (pgd_none(*pgd)) {
end_address += PGDIR_SIZE;
continue;
}
pud = pud_offset(pgd, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
}
pmd = pmd_offset(pud, end_address);
if (pmd_none(*pmd)) {
end_address += PMD_SIZE;
continue;
}
pte = pte_offset_kernel(pmd, end_address);
retry_pte:
if (pte_none(*pte)) {
end_address += PAGE_SIZE;
pte++;
if ((end_address < stop_address) &&
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
goto retry_pte;
continue;
}
/* Found next valid vmem_map page */
break;
} while (end_address < stop_address);
end_address = min(end_address, stop_address);
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
hole_next_pfn = end_address / sizeof(struct page);
return hole_next_pfn - pgdat->node_start_pfn;
}
#else
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
{
return i + 1;
}
#endif
/** /**
* show_mem - give short summary of memory stats * show_mem - give short summary of memory stats
* *
...@@ -625,7 +563,8 @@ void show_mem(void) ...@@ -625,7 +563,8 @@ void show_mem(void)
if (pfn_valid(pgdat->node_start_pfn + i)) if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i); page = pfn_to_page(pgdat->node_start_pfn + i);
else { else {
i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; i = vmemmap_find_next_valid_pfn(pgdat->node_id,
i) - 1;
continue; continue;
} }
if (PageReserved(page)) if (PageReserved(page))
...@@ -751,7 +690,8 @@ void __init paging_init(void) ...@@ -751,7 +690,8 @@ void __init paging_init(void)
efi_memmap_walk(filter_rsvd_memory, count_node_pages); efi_memmap_walk(filter_rsvd_memory, count_node_pages);
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
vmem_map = (struct page *) vmalloc_end; vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL); efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map); printk("Virtual mem_map starts at 0x%p\n", vmem_map);
......
...@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data)
} }
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
int vmemmap_find_next_valid_pfn(int node, int i)
{
unsigned long end_address, hole_next_pfn;
unsigned long stop_address;
pg_data_t *pgdat = NODE_DATA(node);
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
stop_address = (unsigned long) &vmem_map[
pgdat->node_start_pfn + pgdat->node_spanned_pages];
do {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(end_address);
if (pgd_none(*pgd)) {
end_address += PGDIR_SIZE;
continue;
}
pud = pud_offset(pgd, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
}
pmd = pmd_offset(pud, end_address);
if (pmd_none(*pmd)) {
end_address += PMD_SIZE;
continue;
}
pte = pte_offset_kernel(pmd, end_address);
retry_pte:
if (pte_none(*pte)) {
end_address += PAGE_SIZE;
pte++;
if ((end_address < stop_address) &&
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
goto retry_pte;
continue;
}
/* Found next valid vmem_map page */
break;
} while (end_address < stop_address);
end_address = min(end_address, stop_address);
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
hole_next_pfn = end_address / sizeof(struct page);
return hole_next_pfn - pgdat->node_start_pfn;
}
int __init int __init
create_mem_map_page_table (u64 start, u64 end, void *arg) create_mem_map_page_table (u64 start, u64 end, void *arg)
......
...@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size) ...@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size)
*/ */
attr = kern_mem_attribute(offset, size); attr = kern_mem_attribute(offset, size);
if (attr & EFI_MEMORY_WB) if (attr & EFI_MEMORY_WB)
return phys_to_virt(offset); return (void __iomem *) phys_to_virt(offset);
else if (attr & EFI_MEMORY_UC) else if (attr & EFI_MEMORY_UC)
return __ioremap(offset, size); return __ioremap(offset, size);
...@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size) ...@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size)
gran_base = GRANULEROUNDDOWN(offset); gran_base = GRANULEROUNDDOWN(offset);
gran_size = GRANULEROUNDUP(offset + size) - gran_base; gran_size = GRANULEROUNDUP(offset + size) - gran_base;
if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
return phys_to_virt(offset); return (void __iomem *) phys_to_virt(offset);
return __ioremap(offset, size); return __ioremap(offset, size);
} }
...@@ -53,7 +53,7 @@ void __iomem * ...@@ -53,7 +53,7 @@ void __iomem *
ioremap_nocache (unsigned long offset, unsigned long size) ioremap_nocache (unsigned long offset, unsigned long size)
{ {
if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
return 0; return NULL;
return __ioremap(offset, size); return __ioremap(offset, size);
} }
......
...@@ -480,7 +480,7 @@ xpc_activating(void *__partid) ...@@ -480,7 +480,7 @@ xpc_activating(void *__partid)
partid_t partid = (u64) __partid; partid_t partid = (u64) __partid;
struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags; unsigned long irq_flags;
struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 }; struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret; int ret;
......
...@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr) ...@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr)
else else
mmr_war_offset = 0x158; mmr_war_offset = 0x158;
readq_relaxed((void *)(mmr_base + mmr_war_offset)); readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
} }
} }
...@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr) ...@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
if (mmr_offset < 0x45000) { if (mmr_offset < 0x45000) {
if (mmr_offset == 0x100) if (mmr_offset == 0x100)
readq_relaxed((void *)(mmr_base + 0x38)); readq_relaxed((void __iomem *)(mmr_base + 0x38));
readq_relaxed((void *)(mmr_base + 0xb050)); readq_relaxed((void __iomem *)(mmr_base + 0xb050));
} }
} }
......
...@@ -374,7 +374,12 @@ scdrv_init(void) ...@@ -374,7 +374,12 @@ scdrv_init(void)
struct sysctl_data_s *scd; struct sysctl_data_s *scd;
void *salbuf; void *salbuf;
dev_t first_dev, dev; dev_t first_dev, dev;
nasid_t event_nasid = ia64_sn_get_console_nasid(); nasid_t event_nasid;
if (!ia64_platform_is("sn2"))
return -ENODEV;
event_nasid = ia64_sn_get_console_nasid();
if (alloc_chrdev_region(&first_dev, 0, num_cnodes, if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
SYSCTL_BASENAME) < 0) { SYSCTL_BASENAME) < 0) {
......
...@@ -56,6 +56,11 @@ extern void efi_memmap_init(unsigned long *, unsigned long *); ...@@ -56,6 +56,11 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
extern struct page *vmem_map; extern struct page *vmem_map;
extern int find_largest_hole (u64 start, u64 end, void *arg); extern int find_largest_hole (u64 start, u64 end, void *arg);
extern int create_mem_map_page_table (u64 start, u64 end, void *arg); extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else
static inline int vmemmap_find_next_valid_pfn(int node, int i)
{
return i + 1;
}
#endif #endif
#endif /* meminit_h */ #endif /* meminit_h */
...@@ -1433,7 +1433,12 @@ typedef union pal_version_u { ...@@ -1433,7 +1433,12 @@ typedef union pal_version_u {
} pal_version_u_t; } pal_version_u_t;
/* Return PAL version information */ /*
* Return PAL version information. While the documentation states that
* PAL_VERSION can be called in either physical or virtual mode, some
* implementations only allow physical calls. We don't call it very often,
* so the overhead isn't worth eliminating.
*/
static inline s64 static inline s64
ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
{ {
......
...@@ -1124,8 +1124,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, ...@@ -1124,8 +1124,8 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8)) #define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010))
static inline void static inline void
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
* 0xa000000000000000+2*PERCPU_PAGE_SIZE * 0xa000000000000000+2*PERCPU_PAGE_SIZE
* - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
*/ */
#define KERNEL_START (GATE_ADDR+0x100000000) #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册