提交 a1c28b75 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "Changes included in this pull request:

   - revert pxa2xx-flash back to using ioremap_cached() and switch
     memremap() to use arch_memremap_wb()

   - remove pci=firmware command line argument handling

   - remove unnecessary arm_dma_set_mask() implementation, the generic
     implementation will do for ARM

   - removal of the ARM kallsyms "hack" to work around mode switching
     veneers and vectors located below PAGE_OFFSET

   - tidy up build system output a little

   - add L2 cache power management DT bindings

   - remove duplicated local_irq_disable() in reboot paths

   - handle AMBA primecell devices better at registration time with PM
     domains (needed for Samsung SoCs)

   - ARM specific preparation to support Keystone II kexec"

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs
  ARM: 8570/2: Documentation: devicetree: Add PL310 PM bindings
  ARM: 8569/1: pl2x0: Add OF control of cache power management
  ARM: 8568/1: reboot: remove duplicated local_irq_disable()
  ARM: 8566/1: drivers: amba: properly handle devices with power domains
  ARM: provide arm_has_idmap_alias() helper
  ARM: kexec: remove 512MB restriction on kexec crashdump
  ARM: provide improved virt_to_idmap() functionality
  ARM: kexec: fix crashkernel= handling
  ARM: 8557/1: specify install, zinstall, and uinstall as PHONY targets
  ARM: 8562/1: suppress "include/generated/mach-types.h is up to date."
  ARM: 8553/1: kallsyms: remove --page-offset command line option
  ARM: 8552/1: kallsyms: remove special lower address limit for CONFIG_ARM
  ARM: 8555/1: kallsyms: ignore ARM mode switching veneers
  ARM: 8548/1: dma-mapping: remove arm_dma_set_mask()
  ARM: 8554/1: kernel: pci: remove pci=firmware command line parameter handling
  ARM: memremap: implement arch_memremap_wb()
  memremap: add arch specific hook for MEMREMAP_WB mappings
  mtd: pxa2xx-flash: switch back from memremap to ioremap_cached
  ARM: reintroduce ioremap_cached() for creating cached I/O mappings
......@@ -84,6 +84,12 @@ Optional properties:
- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
<1> (forcibly enable), property absent (retain settings set by
firmware)
- arm,dynamic-clock-gating : L2 dynamic clock gating. Value: <0> (forcibly
disable), <1> (forcibly enable), property absent (OS specific behavior,
preferrably retain firmware settings)
- arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable),
<1> (forcibly enable), property absent (OS specific behavior,
preferrably retain firmware settings)
Example:
......
......@@ -263,12 +263,6 @@ The syntax is:
crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
range=start-[end]
Please note, on arm, the offset is required.
crashkernel=<range1>:<size1>[,<range2>:<size2>,...]@offset
range=start-[end]
'start' is inclusive and 'end' is exclusive.
For example:
crashkernel=512M-2G:64M,2G-:128M
......@@ -307,10 +301,9 @@ Boot into System Kernel
on the memory consumption of the kdump system. In general this is not
dependent on the memory size of the production system.
On arm, use "crashkernel=Y@X". Note that the start address of the kernel
will be aligned to 128MiB (0x08000000), so if the start address is not then
any space below the alignment point may be overwritten by the dump-capture kernel,
which means it is possible that the vmcore is not that precise as expected.
On arm, the use of "crashkernel=Y@X" is no longer necessary; the
kernel will automatically locate the crash kernel image within the
first 512MB of RAM if X is not given.
Load the Dump-capture Kernel
......
......@@ -2959,11 +2959,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
for broken drivers that don't call it.
skip_isa_align [X86] do not align io start addr, so can
handle more pci cards
firmware [ARM] Do not re-enumerate the bus but instead
just use the configuration from the
bootloader. This is currently used on
IXP2000 systems where the bus has to be
configured a certain way for adjunct CPUs.
noearly [X86] Don't do any early type 1 scanning.
This might help on some broken boards which
machine check when some devices' config space
......
......@@ -88,7 +88,7 @@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
@$(kecho) ' Kernel: $@ is ready'
PHONY += initrd
PHONY += initrd install zinstall uinstall
initrd:
@test "$(INITRD_PHYS)" != "" || \
(echo This machine does not support INITRD; exit -1)
......
......@@ -162,8 +162,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline void dma_mark_clean(void *addr, size_t size) { }
extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
......
......@@ -392,9 +392,18 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
#define ioremap ioremap
#define ioremap_nocache ioremap
/*
* Do not use ioremap_cache for mapping memory. Use memremap instead.
*/
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
#define ioremap_cache ioremap_cache
/*
* Do not use ioremap_cached in new code. Provided for the benefit of
* the pxa2xx-flash MTD driver only.
*/
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
......@@ -402,6 +411,9 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
#define arch_memremap_wb arch_memremap_wb
/*
* io{read,write}{16,32}be() macros
*/
......
......@@ -288,19 +288,43 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
extern unsigned long (*arch_virt_to_idmap)(unsigned long x);
extern long long arch_phys_to_idmap_offset;
/*
* These are for systems that have a hardware interconnect supported alias of
* physical memory for idmap purposes. Most cases should leave these
* These are for systems that have a hardware interconnect supported alias
* of physical memory for idmap purposes. Most cases should leave these
* untouched. Note: this can only return addresses less than 4GiB.
*/
static inline bool arm_has_idmap_alias(void)
{
return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
}
#define IDMAP_INVALID_ADDR ((u32)~0)
static inline unsigned long phys_to_idmap(phys_addr_t addr)
{
if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
addr += arch_phys_to_idmap_offset;
if (addr > (u32)~0)
addr = IDMAP_INVALID_ADDR;
}
return addr;
}
static inline phys_addr_t idmap_to_phys(unsigned long idmap)
{
phys_addr_t addr = idmap;
if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
addr -= arch_phys_to_idmap_offset;
return addr;
}
static inline unsigned long __virt_to_idmap(unsigned long x)
{
if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
return arch_virt_to_idmap(x);
else
return __virt_to_phys(x);
return phys_to_idmap(__virt_to_phys(x));
}
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
......
......@@ -550,9 +550,6 @@ char * __init pcibios_setup(char *str)
if (!strcmp(str, "debug")) {
debug_pci = 1;
return NULL;
} else if (!strcmp(str, "firmware")) {
pci_add_flags(PCI_PROBE_ONLY);
return NULL;
}
return str;
}
......
......@@ -104,8 +104,6 @@ void machine_halt(void)
{
local_irq_disable();
smp_send_stop();
local_irq_disable();
while (1);
}
......@@ -150,6 +148,5 @@ void machine_restart(char *cmd)
/* Whoops - the platform was unable to reboot. Tell the user! */
printk("Reboot failed -- System halted\n");
local_irq_disable();
while (1);
}
......@@ -941,6 +941,12 @@ static int __init init_machine_late(void)
late_initcall(init_machine_late);
#ifdef CONFIG_KEXEC
/*
* The crash region must be aligned to 128MB to avoid
* zImage relocating below the reserved region.
*/
#define CRASH_ALIGN (128 << 20)
static inline unsigned long long get_total_mem(void)
{
unsigned long total;
......@@ -968,6 +974,26 @@ static void __init reserve_crashkernel(void)
if (ret)
return;
if (crash_base <= 0) {
unsigned long long crash_max = idmap_to_phys((u32)~0);
crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
crash_size, CRASH_ALIGN);
if (!crash_base) {
pr_err("crashkernel reservation failed - No suitable area found.\n");
return;
}
} else {
unsigned long long start;
start = memblock_find_in_range(crash_base,
crash_base + crash_size,
crash_size, SECTION_SIZE);
if (start != crash_base) {
pr_err("crashkernel reservation failed - memory is in use.\n");
return;
}
}
ret = memblock_reserve(crash_base, crash_size);
if (ret < 0) {
pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
......
......@@ -63,11 +63,6 @@ static void __init keystone_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static unsigned long keystone_virt_to_idmap(unsigned long x)
{
return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START;
}
static long long __init keystone_pv_fixup(void)
{
long long offset;
......@@ -91,7 +86,7 @@ static long long __init keystone_pv_fixup(void)
offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START;
/* Populate the arch idmap hook */
arch_virt_to_idmap = keystone_virt_to_idmap;
arch_phys_to_idmap_offset = -offset;
return offset;
}
......
......@@ -647,11 +647,6 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
/* r3p0 or later has power control register */
if (rev >= L310_CACHE_ID_RTL_R3P0)
l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
L310_STNDBY_MODE_EN;
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
......@@ -1141,6 +1136,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 filter[2] = { 0, 0 };
u32 assoc;
u32 prefetch;
u32 power;
u32 val;
int ret;
......@@ -1271,6 +1267,26 @@ static void __init l2c310_of_parse(const struct device_node *np,
}
l2x0_saved_regs.prefetch_ctrl = prefetch;
power = l2x0_saved_regs.pwr_ctrl |
L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
if (!ret) {
if (!val)
power &= ~L310_DYNAMIC_CLK_GATING_EN;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
}
ret = of_property_read_u32(np, "arm,standby-mode", &val);
if (!ret) {
if (!val)
power &= ~L310_STNDBY_MODE_EN;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
}
l2x0_saved_regs.pwr_ctrl = power;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
......
......@@ -96,6 +96,7 @@ struct uniphier_cache_data {
void __iomem *ctrl_base;
void __iomem *rev_base;
void __iomem *op_base;
void __iomem *way_ctrl_base;
u32 way_present_mask;
u32 way_locked_mask;
u32 nsets;
......@@ -256,10 +257,13 @@ static void __init __uniphier_cache_set_locked_ways(
struct uniphier_cache_data *data,
u32 way_mask)
{
unsigned int cpu;
data->way_locked_mask = way_mask & data->way_present_mask;
writel_relaxed(~data->way_locked_mask & data->way_present_mask,
data->ctrl_base + UNIPHIER_SSCLPDAWCR);
for_each_possible_cpu(cpu)
writel_relaxed(~data->way_locked_mask & data->way_present_mask,
data->way_ctrl_base + 4 * cpu);
}
static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
......@@ -459,6 +463,8 @@ static int __init __uniphier_cache_init(struct device_node *np,
goto err;
}
data->way_ctrl_base = data->ctrl_base + 0xc00;
if (*cache_level == 2) {
u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
/*
......@@ -467,6 +473,22 @@ static int __init __uniphier_cache_init(struct device_node *np,
*/
if (revision <= 0x16)
data->range_op_max_size = (u32)1 << 22;
/*
* Unfortunatly, the offset address of active way control base
* varies from SoC to SoC.
*/
switch (revision) {
case 0x11: /* sLD3 */
data->way_ctrl_base = data->ctrl_base + 0x870;
break;
case 0x12: /* LD4 */
case 0x16: /* sld8 */
data->way_ctrl_base = data->ctrl_base + 0x840;
break;
default:
break;
}
}
data->range_op_max_size -= data->line_size;
......
......@@ -190,7 +190,6 @@ struct dma_map_ops arm_dma_ops = {
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask = arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_dma_ops);
......@@ -209,7 +208,6 @@ struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
.set_dma_mask = arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
......@@ -1143,16 +1141,6 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
int arm_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_debug_do_init(void)
......@@ -2006,8 +1994,6 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
.set_dma_mask = arm_dma_set_mask,
};
struct dma_map_ops iommu_coherent_ops = {
......@@ -2021,8 +2007,6 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
.set_dma_mask = arm_dma_set_mask,
};
/**
......
......@@ -15,7 +15,7 @@
* page tables.
*/
pgd_t *idmap_pgd;
unsigned long (*arch_virt_to_idmap)(unsigned long x);
long long arch_phys_to_idmap_offset;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
......
......@@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
}
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
* Don't allow RAM to be mapped with mismatched attributes - this
* causes problems with ARMv6+
*/
if (WARN_ON(pfn_valid(pfn)))
if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
......@@ -380,11 +381,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
__alias(ioremap_cached);
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
EXPORT_SYMBOL(ioremap_cached);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
......@@ -414,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
__builtin_return_address(0));
}
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (__force void *)arch_ioremap_caller(phys_addr, size,
MT_MEMORY_RW,
__builtin_return_address(0));
}
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
......
......@@ -368,11 +368,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
__alias(ioremap_cached);
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
EXPORT_SYMBOL(ioremap_cached);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
......@@ -381,6 +385,11 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
}
EXPORT_SYMBOL(ioremap_wc);
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (void *)phys_addr;
}
void __iounmap(volatile void __iomem *addr)
{
}
......
......@@ -4,7 +4,10 @@
# Copyright (C) 2001 Russell King
#
include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
@$(kecho) ' Generating $@'
@mkdir -p $(dir $@)
$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
quiet_cmd_gen_mach = GEN $@
cmd_gen_mach = mkdir -p $(dir $@) && \
$(AWK) -f $(filter-out $(PHONY),$^) > $@ || \
{ rm -f $@; /bin/false; }
include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
$(call if_changed,gen_mach)
......@@ -336,16 +336,7 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
/**
* amba_device_add - add a previously allocated AMBA device structure
* @dev: AMBA device allocated by amba_device_alloc
* @parent: resource parent for this devices resources
*
* Claim the resource, and read the device cell ID if not already
* initialized. Register the AMBA device with the Linux device
* manager.
*/
int amba_device_add(struct amba_device *dev, struct resource *parent)
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
......@@ -373,6 +364,12 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
goto err_release;
}
ret = dev_pm_domain_attach(&dev->dev, true);
if (ret == -EPROBE_DEFER) {
iounmap(tmp);
goto err_release;
}
ret = amba_get_enable_pclk(dev);
if (ret == 0) {
u32 pid, cid;
......@@ -398,6 +395,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
}
iounmap(tmp);
dev_pm_domain_detach(&dev->dev, true);
if (ret)
goto err_release;
......@@ -421,6 +419,88 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
err_out:
return ret;
}
/*
* Registration of AMBA device require reading its pid and cid registers.
* To do this, the device must be turned on (if it is a part of power domain)
* and have clocks enabled. However in some cases those resources might not be
* yet available. Returning EPROBE_DEFER is not a solution in such case,
* because callers don't handle this special error code. Instead such devices
* are added to the special list and their registration is retried from
* periodic worker, until all resources are available and registration succeeds.
*/
struct deferred_device {
struct amba_device *dev;
struct resource *parent;
struct list_head node;
};
static LIST_HEAD(deferred_devices);
static DEFINE_MUTEX(deferred_devices_lock);
static void amba_deferred_retry_func(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
static void amba_deferred_retry_func(struct work_struct *dummy)
{
struct deferred_device *ddev, *tmp;
mutex_lock(&deferred_devices_lock);
list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) {
int ret = amba_device_try_add(ddev->dev, ddev->parent);
if (ret == -EPROBE_DEFER)
continue;
list_del_init(&ddev->node);
kfree(ddev);
}
if (!list_empty(&deferred_devices))
schedule_delayed_work(&deferred_retry_work,
DEFERRED_DEVICE_TIMEOUT);
mutex_unlock(&deferred_devices_lock);
}
/**
* amba_device_add - add a previously allocated AMBA device structure
* @dev: AMBA device allocated by amba_device_alloc
* @parent: resource parent for this devices resources
*
* Claim the resource, and read the device cell ID if not already
* initialized. Register the AMBA device with the Linux device
* manager.
*/
int amba_device_add(struct amba_device *dev, struct resource *parent)
{
int ret = amba_device_try_add(dev, parent);
if (ret == -EPROBE_DEFER) {
struct deferred_device *ddev;
ddev = kmalloc(sizeof(*ddev), GFP_KERNEL);
if (!ddev)
return -ENOMEM;
ddev->dev = dev;
ddev->parent = parent;
ret = 0;
mutex_lock(&deferred_devices_lock);
if (list_empty(&deferred_devices))
schedule_delayed_work(&deferred_retry_work,
DEFERRED_DEVICE_TIMEOUT);
list_add_tail(&ddev->node, &deferred_devices);
mutex_unlock(&deferred_devices_lock);
}
return ret;
}
EXPORT_SYMBOL_GPL(amba_device_add);
static struct amba_device *
......
......@@ -71,8 +71,8 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
info->map.name);
return -ENOMEM;
}
info->map.cached = memremap(info->map.phys, info->map.size,
MEMREMAP_WB);
info->map.cached =
ioremap_cached(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
......@@ -111,7 +111,7 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
map_destroy(info->mtd);
iounmap(info->map.virt);
if (info->map.cached)
memunmap(info->map.cached);
iounmap(info->map.cached);
kfree(info);
return 0;
}
......
......@@ -27,6 +27,13 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
}
#endif
#ifndef arch_memremap_wb
static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
{
return (__force void *)ioremap_cache(offset, size);
}
#endif
static void *try_ram_remap(resource_size_t offset, size_t size)
{
unsigned long pfn = PHYS_PFN(offset);
......@@ -34,7 +41,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
/* In the simple case just return the existing linear address */
if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
return __va(offset);
return NULL; /* fallback to ioremap_cache */
return NULL; /* fallback to arch_memremap_wb */
}
/**
......@@ -90,7 +97,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
if (is_ram == REGION_INTERSECTS)
addr = try_ram_remap(offset, size);
if (!addr)
addr = ioremap_cache(offset, size);
addr = arch_memremap_wb(offset, size);
}
/*
......
......@@ -63,7 +63,6 @@ static unsigned int table_size, table_cnt;
static int all_symbols = 0;
static int absolute_percpu = 0;
static char symbol_prefix_char = '\0';
static unsigned long long kernel_start_addr = 0;
static int base_relative = 0;
int token_profit[0x10000];
......@@ -223,15 +222,13 @@ static int symbol_valid(struct sym_entry *s)
static char *special_suffixes[] = {
"_veneer", /* arm */
"_from_arm", /* arm */
"_from_thumb", /* arm */
NULL };
int i;
char *sym_name = (char *)s->sym + 1;
if (s->addr < kernel_start_addr)
return 0;
/* skip prefix char */
if (symbol_prefix_char && *sym_name == symbol_prefix_char)
sym_name++;
......@@ -765,9 +762,6 @@ int main(int argc, char **argv)
if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
p++;
symbol_prefix_char = *p;
} else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
const char *p = &argv[i][14];
kernel_start_addr = strtoull(p, NULL, 16);
} else if (strcmp(argv[i], "--base-relative") == 0)
base_relative = 1;
else
......
......@@ -82,10 +82,6 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols"
fi
if [ -n "${CONFIG_ARM}" ] && [ -z "${CONFIG_XIP_KERNEL}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
fi
if [ -n "${CONFIG_KALLSYMS_ABSOLUTE_PERCPU}" ]; then
kallsymopt="${kallsymopt} --absolute-percpu"
fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册