提交 33052057 编写于 作者: T Trond Myklebust
......@@ -452,6 +452,11 @@ running once the system is up.
eata= [HW,SCSI]
ec_intr= [HW,ACPI] ACPI Embedded Controller interrupt mode
Format: <int>
0: polling mode
non-0: interrupt mode (default)
eda= [HW,PS2]
edb= [HW,PS2]
......
......@@ -837,8 +837,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
Module for AC'97 motherboards from Intel and compatibles.
* Intel i810/810E, i815, i820, i830, i84x, MX440
ICH5, ICH6, ICH7, ESB2
* SiS 7012 (SiS 735)
* NVidia NForce, NForce2
* NVidia NForce, NForce2, NForce3, MCP04, CK804
CK8, CK8S, MCP501
* AMD AMD768, AMD8111
* ALi m5455
......@@ -868,6 +870,12 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
--------------------
Module for Intel ICH (i8x0) chipset MC97 modems.
* Intel i810/810E, i815, i820, i830, i84x, MX440
ICH5, ICH6, ICH7
* SiS 7013 (SiS 735)
* NVidia NForce, NForce2, NForce2s, NForce3
* AMD AMD8111
* ALi m5455
ac97_clock - AC'97 codec clock base (0 = auto-detect)
......
......@@ -5206,14 +5206,14 @@ struct _snd_pcm_runtime {
You need to pass the <function>snd_dma_pci_data(pci)</function>,
where pci is the struct <structname>pci_dev</structname> pointer
of the chip as well.
The <type>snd_sg_buf_t</type> instance is created as
The <type>struct snd_sg_buf</type> instance is created as
substream-&gt;dma_private. You can cast
the pointer like:
<informalexample>
<programlisting>
<![CDATA[
struct snd_sg_buf *sgbuf = (struct snd_sg_buf_t*)substream->dma_private;
struct snd_sg_buf *sgbuf = (struct snd_sg_buf *)substream->dma_private;
]]>
</programlisting>
</informalexample>
......
......@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o
obj-y += cstate.o processor.o
endif
......@@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{
unsigned int irq;
unsigned int plat_gsi = gsi;
......@@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
extern void eisa_set_level_irq(unsigned int irq);
if (edge_level == ACPI_LEVEL_SENSITIVE)
if (triggering == ACPI_LEVEL_SENSITIVE)
eisa_set_level_irq(gsi);
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
plat_gsi = mp_register_gsi(gsi, triggering, polarity);
}
#endif
acpi_gsi_to_irq(plat_gsi, &irq);
......
......@@ -14,64 +14,6 @@
#include <acpi/processor.h>
#include <asm/acpi.h>
static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
*pow)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pow->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
unsigned int cpu)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
pow->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
acpi_processor_power_init_intel_pdc(pow);
return;
}
EXPORT_SYMBOL(acpi_processor_power_init_pdc);
/*
* Initialize bm_flags based on the CPU cache properties
* On SMP it depends on cache configuration
......
/*
* arch/i386/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
}
/*
* acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
* driver/acpi/processor.c
*/
static void
acpi_processor_cpu_init_pdc_est(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc_est\n");
if (!cpu_has(c, X86_FEATURE_EST))
return;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
/* CPU specific PDC initialization */
static void
acpi_processor_cpu_init_pdc(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc\n");
perf->pdc = NULL;
if (cpu_has(c, X86_FEATURE_EST))
acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
return;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
......@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
......@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result)
goto err_free;
......
......@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
*/
static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
{
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
/* _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
arg0_buf[0] = ACPI_PDC_REVISION_ID;
arg0_buf[1] = 1;
arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
p.pdc = &arg_list;
/* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) {
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
......
......@@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
int mp_register_gsi (u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
......@@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) {
if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
......@@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
......
......@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += acpi-processor.o
endif
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
......
......@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
struct acpi_resource_vendor *vendor;
struct acpi_vendor_descriptor *descriptor;
u32 length;
u32 byte_length;
if (resource->id != ACPI_RSTYPE_VENDOR)
if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
return AE_OK;
vendor = (struct acpi_resource_vendor *)&resource->data;
descriptor = (struct acpi_vendor_descriptor *)vendor->reserved;
if (vendor->length <= sizeof(*info->descriptor) ||
descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
if (vendor->byte_length <= sizeof(*info->descriptor) ||
descriptor->guid_id != info->descriptor->guid_id ||
efi_guidcmp(descriptor->guid, info->descriptor->guid))
return AE_OK;
length = vendor->length - sizeof(struct acpi_vendor_descriptor);
info->data = acpi_os_allocate(length);
byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
info->data = acpi_os_allocate(byte_length);
if (!info->data)
return AE_NO_MEMORY;
memcpy(info->data,
vendor->reserved + sizeof(struct acpi_vendor_descriptor),
length);
info->length = length;
vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
byte_length);
info->length = byte_length;
return AE_CTRL_TERMINATE;
}
acpi_status
acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
u8 ** data, u32 * length)
u8 ** data, u32 * byte_length)
{
struct acpi_vendor_info info;
......@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
return AE_NOT_FOUND;
*data = info.data;
*length = info.length;
*byte_length = info.length;
return AE_OK;
}
......
/*
* arch/ia64/kernel/cpufreq/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
pr->pdc = NULL;
init_intel_pdc(pr);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void)
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{
if (has_8259 && gsi < 16)
return isa_irq_to_vector(gsi);
return iosapic_register_intr(gsi,
(active_high_low ==
(polarity ==
ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
IOSAPIC_POL_LOW,
(edge_level ==
(triggering ==
ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
IOSAPIC_LEVEL);
}
......
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
......@@ -269,48 +269,6 @@ acpi_cpufreq_verify (
}
/*
* processor_init_pdc - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly. Actual call to _PDC is done in driver/acpi/processor.c
*/
static void
processor_init_pdc (
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
dprintk("processor_init_pdc\n");
perf->pdc = NULL;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
......@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
......@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
processor_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result)
goto err_free;
......
......@@ -193,12 +193,12 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
goto free_resource;
}
min = addr->min_address_range;
min = addr->minimum;
max = min + addr->address_length - 1;
if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
sparse = 1;
space_nr = new_space(addr->address_translation_offset, sparse);
space_nr = new_space(addr->translation_offset, sparse);
if (space_nr == ~0)
goto free_name;
......@@ -285,7 +285,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM;
root = &iomem_resource;
offset = addr.address_translation_offset;
offset = addr.translation_offset;
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
root = &ioport_resource;
......@@ -298,7 +298,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
window = &info->controller->window[info->controller->windows++];
window->resource.name = info->name;
window->resource.flags = flags;
window->resource.start = addr.min_address_range + offset;
window->resource.start = addr.minimum + offset;
window->resource.end = window->resource.start + addr.address_length - 1;
window->resource.child = NULL;
window->offset = offset;
......
......@@ -58,6 +58,7 @@ pcibios_find_pci_bus(struct device_node *dn)
return find_bus_among_children(pdn->phb->bus, dn);
}
EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
/**
* pcibios_remove_pci_devices - remove all devices under this bus
......@@ -106,6 +107,7 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
}
}
}
EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
static int
pcibios_pci_config_bridge(struct pci_dev *dev)
......@@ -172,3 +174,4 @@ pcibios_add_pci_devices(struct pci_bus * bus)
pcibios_pci_config_bridge(dev);
}
}
EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
obj-y := boot.o
boot-y := ../../../i386/kernel/acpi/boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
endif
/*
* arch/x86_64/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -2027,7 +2027,7 @@ int __init io_apic_get_redir_entries (int ioapic)
}
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
......@@ -2049,8 +2049,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
entry.trigger = edge_level;
entry.polarity = active_high_low;
entry.trigger = triggering;
entry.polarity = polarity;
entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq);
......@@ -2065,9 +2065,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
edge_level, active_high_low);
triggering, polarity);
ioapic_register_intr(irq, entry.vector, edge_level);
ioapic_register_intr(irq, entry.vector, triggering);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
......
......@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
int mp_register_gsi(u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
......@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) {
if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
......@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
......
......@@ -267,7 +267,6 @@ config ACPI_DEBUG
config ACPI_EC
bool
depends on X86
default y
help
This driver is required on some systems for the proper operation of
......
......@@ -71,8 +71,8 @@ static struct acpi_driver acpi_memory_device_driver = {
struct acpi_memory_device {
acpi_handle handle;
unsigned int state; /* State of the memory device */
unsigned short cache_attribute; /* memory cache attribute */
unsigned short read_write_attribute; /* memory read/write attribute */
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */
u64 end_addr; /* Memory Range end physical addr */
};
......@@ -97,12 +97,12 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */
mem_device->cache_attribute =
address64.attribute.memory.cache_attribute;
mem_device->read_write_attribute =
address64.attribute.memory.read_write_attribute;
mem_device->start_addr = address64.min_address_range;
mem_device->end_addr = address64.max_address_range;
mem_device->caching =
address64.info.mem.caching;
mem_device->write_protect =
address64.info.mem.write_protect;
mem_device->start_addr = address64.minimum;
mem_device->end_addr = address64.maximum;
}
}
......@@ -250,7 +250,6 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
int result;
u64 start = mem_device->start_addr;
u64 len = mem_device->end_addr - start + 1;
unsigned long attr = mem_device->read_write_attribute;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
......
......@@ -78,9 +78,9 @@ MODULE_LICENSE("GPL");
static uid_t asus_uid;
static gid_t asus_gid;
module_param(asus_uid, uint, 0);
MODULE_PARM_DESC(uid, "UID for entries in /proc/acpi/asus.\n");
MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus.\n");
module_param(asus_gid, uint, 0);
MODULE_PARM_DESC(gid, "GID for entries in /proc/acpi/asus.\n");
MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus.\n");
/* For each model, all features implemented,
* those marked with R are relative to HOTK, A for absolute */
......@@ -302,7 +302,7 @@ static struct model_data model_conf[END_MODEL] = {
.brightness_set = "SPLV",
.brightness_get = "GPLV",
.display_set = "SDSP",
.display_get = "\\SSTE"},
.display_get = "\\_SB.PCI0.P0P1.VGA.GETD"},
{
.name = "M6R",
.mt_mled = "MLED",
......@@ -851,6 +851,8 @@ static int __init asus_hotk_add_fs(struct acpi_device *device)
mode = S_IFREG | S_IRUGO | S_IWUGO;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
"deprecated, use chown and chmod instead!\n");
}
acpi_device_dir(device) = asus_proc_dir;
......@@ -987,9 +989,21 @@ static int __init asus_hotk_get_info(void)
printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
bsts_result);
/* Samsung P30 has a device with a valid _HID whose INIT does not
* return anything. Catch this one and any similar here */
if (buffer.pointer == NULL) {
/* This is unlikely with implicit return */
if (buffer.pointer == NULL)
return -EINVAL;
model = (union acpi_object *) buffer.pointer;
/*
* Samsung P30 has a device with a valid _HID whose INIT does not
* return anything. It used to be possible to catch this exception,
* but the implicit return code will now happily confuse the
* driver. We assume that every ACPI_TYPE_STRING is a valid model
* identifier but it's still possible to get completely bogus data.
*/
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ", model->string.pointer);
} else {
if (asus_info && /* Samsung P30 */
strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
hotk->model = P30;
......@@ -1002,13 +1016,10 @@ static int __init asus_hotk_get_info(void)
"the developers with your DSDT\n");
}
hotk->methods = &model_conf[hotk->model];
return AE_OK;
}
acpi_os_free(model);
model = (union acpi_object *)buffer.pointer;
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ",
model->string.pointer);
return AE_OK;
}
hotk->model = END_MODEL;
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -128,7 +128,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags, walk_state,
&(node));
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
}
......@@ -232,7 +232,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ (acpi_integer) arg->common.value.size;
if (position > ACPI_UINT32_MAX) {
ACPI_REPORT_ERROR(("Bit offset within field too large (> 0xFFFFFFFF)\n"));
ACPI_ERROR((AE_INFO,
"Bit offset within field too large (> 0xFFFFFFFF)"));
return_ACPI_STATUS(AE_SUPPORT);
}
......@@ -268,8 +269,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR((char *)&arg->named.name,
status);
ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
}
......@@ -293,7 +294,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ (acpi_integer) arg->common.value.size;
if (position > ACPI_UINT32_MAX) {
ACPI_REPORT_ERROR(("Field [%4.4s] bit offset too large (> 0xFFFFFFFF)\n", (char *)&info->field_node->name));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] bit offset too large (> 0xFFFFFFFF)",
ACPI_CAST_PTR(char,
&info->field_node->
name)));
return_ACPI_STATUS(AE_SUPPORT);
}
......@@ -302,9 +307,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Invalid opcode in field list: %X\n",
arg->common.aml_opcode));
ACPI_ERROR((AE_INFO,
"Invalid opcode in field list: %X",
arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
......@@ -349,7 +354,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.name, status);
ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
......@@ -431,8 +436,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_NS_ERROR_IF_FOUND,
walk_state, &node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR((char *)&arg->named.name,
status);
ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
}
......@@ -488,7 +493,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.name, status);
ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
......@@ -502,7 +507,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
......@@ -560,7 +565,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
......@@ -573,7 +578,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -84,7 +84,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
acpi_object_type type;
acpi_status status;
ACPI_FUNCTION_NAME("ds_init_one_object");
ACPI_FUNCTION_ENTRY();
/*
* We are only interested in NS nodes owned by the table that
......@@ -105,11 +105,10 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
status = acpi_ds_initialize_region(obj_handle);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region %p [%4.4s] - Init failure, %s\n",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"During Region initialization %p [%4.4s]",
obj_handle,
acpi_ut_get_node_name(obj_handle)));
}
info->op_region_count++;
......@@ -117,14 +116,6 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
case ACPI_TYPE_METHOD:
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/*
* Set the execution data width (32 or 64) based upon the
* revision number of the parent ACPI table.
......@@ -134,6 +125,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
if (info->table_desc->pointer->revision == 1) {
node->flags |= ANOBJ_DATA_WIDTH_32;
}
#ifdef ACPI_INIT_PARSE_METHODS
/*
* Note 11/2005: Removed this code to parse all methods during table
* load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/*
* Always parse methods to detect errors, we will delete
......@@ -141,15 +147,15 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*/
status = acpi_ds_parse_method(obj_handle);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"\n+Method %p [%4.4s] - parse failure, %s\n",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
ACPI_ERROR((AE_INFO,
"Method %p [%4.4s] - parse failure, %s",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
/* This parse failed, but we will continue parsing more methods */
}
#endif
info->method_count++;
break;
......@@ -207,8 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "walk_namespace failed, %s\n",
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -47,135 +47,66 @@
#include <acpi/acdispat.h>
#include <acpi/acinterp.h>
#include <acpi/acnamesp.h>
#include <acpi/acdisasm.h>
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmethod")
/*******************************************************************************
*
* FUNCTION: acpi_ds_parse_method
* FUNCTION: acpi_ds_method_error
*
* PARAMETERS: Node - Method node
* PARAMETERS: Status - Execution status
* walk_state - Current state
*
* RETURN: Status
*
* DESCRIPTION: Parse the AML that is associated with the method.
* DESCRIPTION: Called on method error. Invoke the global exception handler if
* present, dump the method data if the disassembler is configured
*
* MUTEX: Assumes parser is locked
* Note: Allows the exception handler to change the status code
*
******************************************************************************/
acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
acpi_status
acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *obj_desc;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
ACPI_FUNCTION_ENTRY();
/* Parameter Validation */
/* Ignore AE_OK and control exception codes */
if (!node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
return (status);
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Parsing [%4.4s] **** named_obj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */
/* Invoke the global exception handler */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
return_ACPI_STATUS(AE_NULL_OBJECT);
}
if (acpi_gbl_exception_handler) {
/* Exit the interpreter, allow handler to execute methods */
/* Create a mutex for the method if there is a concurrency limit */
acpi_ex_exit_interpreter();
if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
(!obj_desc->method.semaphore)) {
status = acpi_os_create_semaphore(obj_desc->method.concurrency,
obj_desc->method.concurrency,
&obj_desc->method.semaphore);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Allocate a new parser op to be the root of the parsed
* method tree
*/
op = acpi_ps_alloc_op(AML_METHOD_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Init new op with the method name and pointer back to the Node */
acpi_ps_set_name(op, node->name.integer);
op->common.node = node;
/*
* Get a new owner_id for objects created by this method. Namespace
* objects (such as Operation Regions) can be created during the
* first pass parse.
*/
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Create and initialize a new walk state */
walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup2;
/*
* Handler can map the exception code to anything it wants, including
* AE_OK, in which case the executing method will not be aborted.
*/
status = acpi_gbl_exception_handler(status,
walk_state->method_node ?
walk_state->method_node->
name.integer : 0,
walk_state->opcode,
walk_state->aml_offset,
NULL);
(void)acpi_ex_enter_interpreter();
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, 1);
#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup2;
}
/* Display method locals/args if disassembler is present */
/*
* Parse the method, first pass
*
* The first pass load is where newly declared named objects are added into
* the namespace. Actual evaluation of the named objects (what would be
* called a "second pass") happens during the actual execution of the
* method so that operands to the named objects can take on dynamic
* run-time values.
*/
status = acpi_ps_parse_aml(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup2;
acpi_dm_dump_method_info(status, walk_state, walk_state->op);
}
#endif
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
acpi_ut_get_node_name(node), node, op));
/*
* Delete the parse tree. We simply re-parse the method for every
* execution since there isn't much overhead (compared to keeping lots
* of parse trees around)
*/
acpi_ns_delete_namespace_subtree(node);
acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
cleanup2:
acpi_ut_release_owner_id(&obj_desc->method.owner_id);
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
return (status);
}
/*******************************************************************************
......@@ -195,9 +126,9 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
******************************************************************************/
acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
union acpi_operand_object *obj_desc,
struct acpi_namespace_node *calling_method_node)
acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
union acpi_operand_object * obj_desc,
struct acpi_namespace_node * calling_method_node)
{
acpi_status status = AE_OK;
......@@ -210,7 +141,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
/* Prevent wraparound of thread count */
if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
ACPI_REPORT_ERROR(("Method reached maximum reentrancy limit (255)\n"));
ACPI_ERROR((AE_INFO,
"Method reached maximum reentrancy limit (255)"));
return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
}
......@@ -539,22 +471,61 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
acpi_os_signal_semaphore(walk_state->method_desc->method.
semaphore, 1);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not signal method semaphore\n"));
ACPI_ERROR((AE_INFO,
"Could not signal method semaphore"));
/* Ignore error and continue cleanup */
}
}
/*
* There are no more threads executing this method. Perform
* additional cleanup.
*
* The method Node is stored in the walk state
*/
method_node = walk_state->method_node;
/* Lock namespace for possible update */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
/*
* Delete any namespace entries created immediately underneath
* the method
*/
if (method_node->child) {
acpi_ns_delete_namespace_subtree(method_node);
}
/*
* Delete any namespace entries created anywhere else within
* the namespace by the execution of this method
*/
acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
owner_id);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
/* Are there any other threads currently executing this method? */
if (walk_state->method_desc->method.thread_count) {
/*
* Additional threads. Do not release the owner_id in this case,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Not deleting method namespace, there are still %d threads\n",
"*** Completed execution of one thread, %d threads remaining\n",
walk_state->method_desc->method.
thread_count));
} else { /* This is the last executing thread */
} else {
/* This is the only executing thread for this method */
/*
* Support to dynamically change a method from not_serialized to
* Serialized if it appears that the method is written foolishly and
* Serialized if it appears that the method is incorrectly written and
* does not support multiple thread execution. The best example of this
* is if such a method creates namespace objects and blocks. A second
* thread will fail with an AE_ALREADY_EXISTS exception
......@@ -570,34 +541,8 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
semaphore);
}
/*
* There are no more threads executing this method. Perform
* additional cleanup.
*
* The method Node is stored in the walk state
*/
method_node = walk_state->method_node;
/*
* Delete any namespace entries created immediately underneath
* the method
*/
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
if (method_node->child) {
acpi_ns_delete_namespace_subtree(method_node);
}
/* No more threads, we can free the owner_id */
/*
* Delete any namespace entries created anywhere else within
* the namespace
*/
acpi_ns_delete_namespace_by_owner(walk_state->method_desc->
method.owner_id);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
acpi_ut_release_owner_id(&walk_state->method_desc->method.
owner_id);
}
......@@ -606,3 +551,140 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
(void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
return_VOID;
}
#ifdef ACPI_INIT_PARSE_METHODS
/*
* Note 11/2005: Removed this code to parse all methods during table
* load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*******************************************************************************
*
* FUNCTION: acpi_ds_parse_method
*
* PARAMETERS: Node - Method node
*
* RETURN: Status
*
* DESCRIPTION: Parse the AML that is associated with the method.
*
* MUTEX: Assumes parser is locked
*
******************************************************************************/
acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
{
acpi_status status;
union acpi_operand_object *obj_desc;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
/* Parameter Validation */
if (!node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Parsing [%4.4s] **** named_obj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
return_ACPI_STATUS(AE_NULL_OBJECT);
}
/* Create a mutex for the method if there is a concurrency limit */
if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
(!obj_desc->method.semaphore)) {
status = acpi_os_create_semaphore(obj_desc->method.concurrency,
obj_desc->method.concurrency,
&obj_desc->method.semaphore);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Allocate a new parser op to be the root of the parsed
* method tree
*/
op = acpi_ps_alloc_op(AML_METHOD_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Init new op with the method name and pointer back to the Node */
acpi_ps_set_name(op, node->name.integer);
op->common.node = node;
/*
* Get a new owner_id for objects created by this method. Namespace
* objects (such as Operation Regions) can be created during the
* first pass parse.
*/
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Create and initialize a new walk state */
walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup2;
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, 1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup2;
}
/*
* Parse the method, first pass
*
* The first pass load is where newly declared named objects are added into
* the namespace. Actual evaluation of the named objects (what would be
* called a "second pass") happens during the actual execution of the
* method so that operands to the named objects can take on dynamic
* run-time values.
*/
status = acpi_ps_parse_aml(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup2;
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
acpi_ut_get_node_name(node), node, op));
/*
* Delete the parse tree. We simply re-parse the method for every
* execution since there isn't much overhead (compared to keeping lots
* of parse trees around)
*/
acpi_ns_delete_namespace_subtree(node);
acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
cleanup2:
acpi_ut_release_owner_id(&obj_desc->method.owner_id);
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
#endif
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -260,9 +260,9 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_LOCAL_OP:
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Local index %d is invalid (max %d)\n",
index, ACPI_METHOD_MAX_LOCAL));
ACPI_ERROR((AE_INFO,
"Local index %d is invalid (max %d)",
index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
......@@ -274,9 +274,9 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_ARG_OP:
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Arg index %d is invalid (max %d)\n",
index, ACPI_METHOD_MAX_ARG));
ACPI_ERROR((AE_INFO,
"Arg index %d is invalid (max %d)",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
......@@ -286,8 +286,7 @@ acpi_ds_method_data_get_node(u16 opcode,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Opcode %d is invalid\n",
opcode));
ACPI_ERROR((AE_INFO, "Opcode %d is invalid", opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
......@@ -378,8 +377,7 @@ acpi_ds_method_data_get_value(u16 opcode,
/* Validate the object descriptor */
if (!dest_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null object descriptor pointer\n"));
ACPI_ERROR((AE_INFO, "Null object descriptor pointer"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
......@@ -424,23 +422,24 @@ acpi_ds_method_data_get_value(u16 opcode,
switch (opcode) {
case AML_ARG_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Uninitialized Arg[%d] at node %p\n",
index, node));
ACPI_ERROR((AE_INFO,
"Uninitialized Arg[%d] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
case AML_LOCAL_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Uninitialized Local[%d] at node %p\n",
index, node));
ACPI_ERROR((AE_INFO,
"Uninitialized Local[%d] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
default:
ACPI_REPORT_ERROR(("Not Arg/Local opcode: %X\n",
opcode));
ACPI_ERROR((AE_INFO,
"Not a Arg/Local opcode: %X",
opcode));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -51,6 +51,7 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
/* Local prototypes */
static acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
......@@ -85,7 +86,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is an named object reference. If this name was
* This is a named object reference. If this name was
* previously looked up in the namespace, it was stored in this op.
* Otherwise, go ahead and look it up now
*/
......@@ -96,18 +97,48 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, NULL,
(struct acpi_namespace_node **)
&(op->common.node));
ACPI_CAST_INDIRECT_PTR(struct
acpi_namespace_node,
&(op->
common.
node)));
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(op->common.value.string,
status);
/* Check if we are resolving a named reference within a package */
if ((status == AE_NOT_FOUND)
&& (acpi_gbl_enable_interpreter_slack)
&&
((op->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode ==
AML_VAR_PACKAGE_OP))) {
/*
* We didn't find the target and we are populating elements
* of a package - ignore if slack enabled. Some ASL code
* contains dangling invalid references in packages and
* expects that no exception will be issued. Leave the
* element as a null element. It cannot be used, but it
* can be overwritten by subsequent ASL code - this is
* typically the case.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Ignoring unresolved reference in package [%4.4s]\n",
walk_state->
scope_info->scope.
node->name.ascii));
return_ACPI_STATUS(AE_OK);
} else {
ACPI_ERROR_NAMESPACE(op->common.value.
string, status);
}
return_ACPI_STATUS(status);
}
}
}
/* Create and init the internal ACPI object */
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
(op->common.aml_opcode))->
......@@ -157,13 +188,13 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj");
/*
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node), otherwise it must
* be created.
*/
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/*
* We are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node)
*/
} else {
if (!obj_desc) {
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
......@@ -183,10 +214,9 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
byte_list = arg->named.next;
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Expecting bytelist, got AML opcode %X in op %p\n",
byte_list->common.aml_opcode,
byte_list));
ACPI_ERROR((AE_INFO,
"Expecting bytelist, got AML opcode %X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
return (AE_TYPE);
......@@ -259,7 +289,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc = NULL;
u32 package_list_length;
acpi_status status = AE_OK;
u32 i;
acpi_native_uint i;
ACPI_FUNCTION_TRACE("ds_build_internal_package_obj");
......@@ -271,13 +301,12 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
parent = parent->common.parent;
}
/*
* If we are evaluating a Named package object "Name (xxxx, Package)",
* the package object already exists, otherwise it must be created.
*/
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/*
* We are evaluating a Named package object "Name (xxxx, Package)".
* Get the existing package object from the NS node
*/
} else {
if (!obj_desc) {
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
*obj_desc_ptr = obj_desc;
if (!obj_desc) {
......@@ -291,11 +320,9 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
/* Count the number of items in the package list */
package_list_length = 0;
arg = op->common.value.arg;
arg = arg->common.next;
while (arg) {
package_list_length++;
for (package_list_length = 0; arg; package_list_length++) {
arg = arg->common.next;
}
......@@ -322,12 +349,11 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
/*
* Now init the elements of the package
* Initialize all elements of the package
*/
i = 0;
arg = op->common.value.arg;
arg = arg->common.next;
while (arg) {
for (i = 0; arg; i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
/* Object (package or buffer) is already built */
......@@ -340,8 +366,6 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
package.
elements[i]);
}
i++;
arg = arg->common.next;
}
......@@ -518,9 +542,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown constant opcode %X\n",
opcode));
ACPI_ERROR((AE_INFO,
"Unknown constant opcode %X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
}
......@@ -535,9 +559,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown Integer type %X\n",
op_info->type));
ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
}
......@@ -615,9 +638,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unimplemented data type: %X\n",
ACPI_GET_OBJECT_TYPE(obj_desc)));
ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_OPERAND_TYPE;
break;
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -245,7 +245,9 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->buffer.node;
if (!node) {
ACPI_REPORT_ERROR(("No pointer back to NS node in buffer obj %p\n", obj_desc));
ACPI_ERROR((AE_INFO,
"No pointer back to NS node in buffer obj %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -287,8 +289,9 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->package.node;
if (!node) {
ACPI_REPORT_ERROR(("No pointer back to NS node in package %p\n",
obj_desc));
ACPI_ERROR((AE_INFO,
"No pointer back to NS node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -413,9 +416,9 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Host object must be a Buffer */
if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Target of Create Field is not a Buffer object - %s\n",
acpi_ut_get_object_type_name(buffer_desc)));
ACPI_ERROR((AE_INFO,
"Target of Create Field is not a Buffer object - %s",
acpi_ut_get_object_type_name(buffer_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -427,10 +430,10 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
* after resolution in acpi_ex_resolve_operands().
*/
if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"(%s) destination not a NS Node [%s]\n",
acpi_ps_get_opcode_name(aml_opcode),
acpi_ut_get_descriptor_name(result_desc)));
ACPI_ERROR((AE_INFO,
"(%s) destination not a NS Node [%s]",
acpi_ps_get_opcode_name(aml_opcode),
acpi_ut_get_descriptor_name(result_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -453,8 +456,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Must have a valid (>0) bit count */
if (bit_count == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Attempt to create_field of length 0\n"));
ACPI_ERROR((AE_INFO,
"Attempt to create_field of length zero"));
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
......@@ -507,9 +510,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown field creation opcode %02x\n",
aml_opcode));
ACPI_ERROR((AE_INFO,
"Unknown field creation opcode %02x", aml_opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
......@@ -517,13 +519,12 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Entire field must fit within the current length of the buffer */
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] size %d exceeds Buffer [%4.4s] size %d (bits)\n",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.
node),
8 * (u32) buffer_desc->buffer.length));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.node),
8 * (u32) buffer_desc->buffer.length));
status = AE_AML_BUFFER_LIMIT;
goto cleanup;
}
......@@ -629,9 +630,9 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
"after acpi_ex_resolve_operands");
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "(%s) bad operand(s) (%X)\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode), status));
ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
acpi_ps_get_opcode_name(op->common.aml_opcode),
status));
return_ACPI_STATUS(status);
}
......@@ -1155,9 +1156,8 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown control opcode=%X Op=%p\n",
op->common.aml_opcode, op));
ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
break;
......
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -176,8 +176,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
/* Must have both an Op and a Result Object */
if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n"));
return_VALUE(TRUE);
ACPI_ERROR((AE_INFO, "Null Op"));
return_UINT8(TRUE);
}
/*
......@@ -208,7 +208,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
"At Method level, result of [%s] not used\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode)));
return_VALUE(FALSE);
return_UINT8(FALSE);
}
/* Get info on the parent. The root_op is AML_SCOPE */
......@@ -216,9 +216,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
parent_info =
acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
if (parent_info->class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown parent opcode. Op=%p\n", op));
return_VALUE(FALSE);
ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
return_UINT8(FALSE);
}
/*
......@@ -304,7 +303,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
return_VALUE(TRUE);
return_UINT8(TRUE);
result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
......@@ -313,7 +312,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
return_VALUE(FALSE);
return_UINT8(FALSE);
}
/*******************************************************************************
......@@ -344,7 +343,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj);
if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n"));
ACPI_ERROR((AE_INFO, "Null Op"));
return_VOID;
}
......@@ -567,7 +566,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(name_string, status);
ACPI_ERROR_NAMESPACE(name_string, status);
}
}
......@@ -616,7 +615,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
if (op_info->flags & AML_HAS_RETVAL) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Argument previously created, already stacked \n"));
"Argument previously created, already stacked\n"));
ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
(walk_state->
......@@ -635,10 +634,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* Only error is underflow, and this indicates
* a missing or null operand!
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Missing or null operand, %s\n",
acpi_format_exception
(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Missing or null operand"));
return_ACPI_STATUS(status);
}
} else {
......@@ -730,7 +727,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
*/
(void)acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "While creating Arg %d - %s\n",
(arg_count + 1), acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d",
(arg_count + 1)));
return_ACPI_STATUS(status);
}
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -100,9 +100,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (result_obj) {
status = acpi_ds_result_pop(&obj_desc, walk_state);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not get result from predicate evaluation, %s\n",
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Could not get result from predicate evaluation"));
return_ACPI_STATUS(status);
}
......@@ -123,9 +122,9 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
}
if (!obj_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No predicate obj_desc=%p State=%p\n",
obj_desc, walk_state));
ACPI_ERROR((AE_INFO,
"No predicate obj_desc=%p State=%p",
obj_desc, walk_state));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
......@@ -140,10 +139,10 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
}
if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Bad predicate (not an integer) obj_desc=%p State=%p Type=%X\n",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
ACPI_ERROR((AE_INFO,
"Bad predicate (not an integer) obj_desc=%p State=%p Type=%X",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -314,12 +313,13 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
case AML_CLASS_EXECUTE:
case AML_CLASS_CREATE:
/*
* Most operators with arguments.
* Start a new result/operand state
*/
status = acpi_ds_result_stack_push(walk_state);
if (walk_state->opcode != AML_CREATE_FIELD_OP) {
status = acpi_ds_result_stack_push(walk_state);
}
break;
default:
......@@ -361,8 +361,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown opcode %X\n",
op->common.aml_opcode));
ACPI_ERROR((AE_INFO, "Unknown opcode %X",
op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
}
......@@ -452,12 +452,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
walk_state->operands[1]->reference.offset)) {
status = AE_OK;
} else {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"[%s]: Could not resolve operands, %s\n",
acpi_ps_get_opcode_name
(walk_state->opcode),
acpi_format_exception
(status)));
ACPI_EXCEPTION((AE_INFO, status,
"While resolving operands for [%s]",
acpi_ps_get_opcode_name
(walk_state->opcode)));
}
}
......@@ -676,8 +674,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_UNDEFINED:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Undefined opcode type Op=%p\n", op));
ACPI_ERROR((AE_INFO,
"Undefined opcode type Op=%p", op));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
case AML_TYPE_BOGUS:
......@@ -689,10 +687,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p\n",
op_class, op_type,
op->common.aml_opcode, op));
ACPI_ERROR((AE_INFO,
"Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
op_class, op_type, op->common.aml_opcode,
op));
status = AE_NOT_IMPLEMENTED;
break;
......@@ -723,20 +721,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
cleanup:
/* Invoke exception handler on error */
if (ACPI_FAILURE(status) &&
acpi_gbl_exception_handler && !(status & AE_CODE_CONTROL)) {
acpi_ex_exit_interpreter();
status = acpi_gbl_exception_handler(status,
walk_state->method_node->
name.integer,
walk_state->opcode,
walk_state->aml_offset,
NULL);
(void)acpi_ex_enter_interpreter();
}
if (walk_state->result_obj) {
/* Break to debugger to display result */
......@@ -758,18 +742,14 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
}
#endif
/* Always clear the object stack */
walk_state->num_operands = 0;
#ifdef ACPI_DISASSEMBLER
/* On error, display method locals/args */
/* Invoke exception handler on error */
if (ACPI_FAILURE(status)) {
acpi_dm_dump_method_info(status, walk_state, op);
status = acpi_ds_method_error(status, walk_state);
}
#endif
/* Always clear the object stack */
walk_state->num_operands = 0;
return_ACPI_STATUS(status);
}
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
char *path;
u32 flags;
ACPI_FUNCTION_NAME("ds_load1_begin_op");
ACPI_FUNCTION_TRACE("ds_load1_begin_op");
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
......@@ -138,14 +138,14 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
if (op) {
if (!(walk_state->op_info->flags & AML_NAMED)) {
*out_op = op;
return (AE_OK);
return_ACPI_STATUS(AE_OK);
}
/* Check if this object has already been installed in the namespace */
if (op->common.node) {
*out_op = op;
return (AE_OK);
return_ACPI_STATUS(AE_OK);
}
}
......@@ -187,8 +187,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
}
#endif
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(path, status);
return (status);
ACPI_ERROR_NAMESPACE(path, status);
return_ACPI_STATUS(status);
}
/*
......@@ -233,9 +233,11 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* All other types are an error */
ACPI_REPORT_ERROR(("Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)\n", acpi_ut_get_type_name(node->type), path));
ACPI_ERROR((AE_INFO,
"Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)",
acpi_ut_get_type_name(node->type), path));
return (AE_AML_OPERAND_TYPE);
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
break;
......@@ -257,6 +259,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* buffer_field, or Package), the name of the object is already
* in the namespace.
*/
if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */
......@@ -265,6 +268,16 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
break;
}
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (walk_state->method_node) {
node = NULL;
status = AE_OK;
break;
}
flags = ACPI_NS_NO_UPSEARCH;
if ((walk_state->opcode != AML_SCOPE_OP) &&
(!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
......@@ -289,8 +302,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
ACPI_IMODE_LOAD_PASS1, flags, walk_state,
&(node));
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(path, status);
return (status);
ACPI_ERROR_NAMESPACE(path, status);
return_ACPI_STATUS(status);
}
break;
}
......@@ -302,28 +315,29 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
op = acpi_ps_alloc_op(walk_state->opcode);
if (!op) {
return (AE_NO_MEMORY);
return_ACPI_STATUS(AE_NO_MEMORY);
}
}
/* Initialize */
op->named.name = node->name.integer;
/* Initialize the op */
#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
op->named.path = (u8 *) path;
op->named.path = ACPI_CAST_PTR(u8, path);
#endif
/*
* Put the Node in the "op" object that the parser uses, so we
* can get it again quickly when this scope is closed
*/
op->common.node = node;
if (node) {
/*
* Put the Node in the "op" object that the parser uses, so we
* can get it again quickly when this scope is closed
*/
op->common.node = node;
op->named.name = node->name.integer;
}
acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state),
op);
*out_op = op;
return (status);
return_ACPI_STATUS(status);
}
/*******************************************************************************
......@@ -339,13 +353,13 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
*
******************************************************************************/
acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
{
union acpi_parse_object *op;
acpi_object_type object_type;
acpi_status status = AE_OK;
ACPI_FUNCTION_NAME("ds_load1_end_op");
ACPI_FUNCTION_TRACE("ds_load1_end_op");
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
......@@ -354,7 +368,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
return (AE_OK);
return_ACPI_STATUS(AE_OK);
}
/* Get the object type to determine if we should pop the scope */
......@@ -363,21 +377,37 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
#ifndef ACPI_NO_METHOD_EXECUTION
if (walk_state->op_info->flags & AML_FIELD) {
if (walk_state->opcode == AML_FIELD_OP ||
walk_state->opcode == AML_BANK_FIELD_OP ||
walk_state->opcode == AML_INDEX_FIELD_OP) {
status = acpi_ds_init_field_objects(op, walk_state);
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (!walk_state->method_node) {
if (walk_state->opcode == AML_FIELD_OP ||
walk_state->opcode == AML_BANK_FIELD_OP ||
walk_state->opcode == AML_INDEX_FIELD_OP) {
status =
acpi_ds_init_field_objects(op, walk_state);
}
}
return (status);
return_ACPI_STATUS(status);
}
if (op->common.aml_opcode == AML_REGION_OP) {
status = acpi_ex_create_region(op->named.data, op->named.length,
(acpi_adr_space_type)
((op->common.value.arg)->common.
value.integer), walk_state);
if (ACPI_FAILURE(status)) {
return (status);
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (!walk_state->method_node) {
if (op->common.aml_opcode == AML_REGION_OP) {
status =
acpi_ex_create_region(op->named.data,
op->named.length,
(acpi_adr_space_type)
((op->common.value.arg)->
common.value.integer),
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
}
#endif
......@@ -391,47 +421,63 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
common.
aml_opcode))->
object_type;
op->common.node->type = (u8) object_type;
/* Set node type if we have a namespace node */
if (op->common.node) {
op->common.node->type = (u8) object_type;
}
}
}
if (op->common.aml_opcode == AML_METHOD_OP) {
/*
* method_op pkg_length name_string method_flags term_list
*
* Note: We must create the method node/object pair as soon as we
* see the method declaration. This allows later pass1 parsing
* of invocations of the method (need to know the number of
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p named_obj=%p\n",
walk_state, op, op->named.node));
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (!walk_state->method_node) {
if (op->common.aml_opcode == AML_METHOD_OP) {
/*
* method_op pkg_length name_string method_flags term_list
*
* Note: We must create the method node/object pair as soon as we
* see the method declaration. This allows later pass1 parsing
* of invocations of the method (need to know the number of
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p named_obj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] = (void *)op->named.node;
walk_state->num_operands = 1;
if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] =
ACPI_CAST_PTR(void, op->named.node);
walk_state->num_operands = 1;
status =
acpi_ds_create_operands(walk_state,
op->common.value.arg);
if (ACPI_SUCCESS(status)) {
status = acpi_ex_create_method(op->named.data,
op->named.length,
walk_state);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
status =
acpi_ds_create_operands(walk_state,
op->common.value.
arg);
if (ACPI_SUCCESS(status)) {
status =
acpi_ex_create_method(op->named.
data,
op->named.
length,
walk_state);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) {
return (status);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
}
}
/* Pop the scope stack */
/* Pop the scope stack (only if loading a table) */
if (acpi_ns_opens_scope(object_type)) {
if (!walk_state->method_node && acpi_ns_opens_scope(object_type)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"(%s): Popping scope for Op %p\n",
acpi_ut_get_type_name(object_type), op));
......@@ -439,7 +485,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
status = acpi_ds_scope_stack_pop(walk_state);
}
return (status);
return_ACPI_STATUS(status);
}
/*******************************************************************************
......@@ -456,8 +502,8 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
******************************************************************************/
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
union acpi_parse_object ** out_op)
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op)
{
union acpi_parse_object *op;
struct acpi_namespace_node *node;
......@@ -574,10 +620,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
ACPI_REPORT_NSERROR(buffer_ptr, status);
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
}
#else
ACPI_REPORT_NSERROR(buffer_ptr, status);
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
......@@ -607,7 +653,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
* Scope (DEB) { ... }
*/
ACPI_REPORT_WARNING(("Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n", buffer_ptr, acpi_ut_get_type_name(node->type)));
ACPI_WARNING((AE_INFO,
"Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
buffer_ptr,
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
......@@ -617,7 +666,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
/* All other types are an error */
ACPI_REPORT_ERROR(("Invalid type (%s) for target of Scope operator [%4.4s]\n", acpi_ut_get_type_name(node->type), buffer_ptr));
ACPI_ERROR((AE_INFO,
"Invalid type (%s) for target of Scope operator [%4.4s]",
acpi_ut_get_type_name(node->type),
buffer_ptr));
return (AE_AML_OPERAND_TYPE);
}
......@@ -670,7 +722,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
}
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(buffer_ptr, status);
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
return_ACPI_STATUS(status);
}
......@@ -840,6 +892,13 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_NAMED_FIELD:
/*
* If we are executing a method, initialize the field
*/
if (walk_state->method_node) {
status = acpi_ds_init_field_objects(op, walk_state);
}
switch (op->common.aml_opcode) {
case AML_INDEX_FIELD_OP:
......@@ -929,6 +988,24 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
switch (op->common.aml_opcode) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_REGION_OP:
/*
* If we are executing a method, initialize the region
*/
if (walk_state->method_node) {
status =
acpi_ex_create_region(op->named.data,
op->named.length,
(acpi_adr_space_type)
((op->common.value.
arg)->common.value.
integer),
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
}
/*
* The op_region is not fully parsed at this time. Only valid
* argument is the space_id. (We must save the address of the
......@@ -957,11 +1034,50 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
status = acpi_ds_create_node(walk_state, node, op);
break;
case AML_METHOD_OP:
/*
* method_op pkg_length name_string method_flags term_list
*
* Note: We must create the method node/object pair as soon as we
* see the method declaration. This allows later pass1 parsing
* of invocations of the method (need to know the number of
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p named_obj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] =
ACPI_CAST_PTR(void, op->named.node);
walk_state->num_operands = 1;
status =
acpi_ds_create_operands(walk_state,
op->common.value.
arg);
if (ACPI_SUCCESS(status)) {
status =
acpi_ex_create_method(op->named.
data,
op->named.
length,
walk_state);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */
default:
/* All NAMED_COMPLEX opcodes must be handled above */
/* Note: Method objects were already created in Pass 1 */
break;
}
break;
......@@ -1004,7 +1120,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
op->common.node = new_node;
} else {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
}
break;
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -107,14 +107,14 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
if (!node) {
/* Invalid scope */
ACPI_REPORT_ERROR(("ds_scope_stack_push: null scope passed\n"));
ACPI_ERROR((AE_INFO, "Null scope parameter"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Make sure object type is valid */
if (!acpi_ut_valid_object_type(type)) {
ACPI_REPORT_WARNING(("ds_scope_stack_push: Invalid object type: 0x%X\n", type));
ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type));
}
/* Allocate a new scope object */
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -92,26 +92,23 @@ acpi_ds_result_remove(union acpi_operand_object **object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result object pushed! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
walk_state));
return (AE_NOT_EXIST);
}
if (index >= ACPI_OBJ_MAX_OPERAND) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Index out of range: %X State=%p Num=%X\n",
index, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Index out of range: %X State=%p Num=%X",
index, walk_state, state->results.num_results));
}
/* Check for a valid result object */
if (!state->results.obj_desc[index]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X, Index=%X\n",
walk_state, state->results.num_results,
index));
ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X, Index=%X",
walk_state, state->results.num_results, index));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -163,9 +160,8 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
}
if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Result stack is empty! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -192,8 +188,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
}
}
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result objects! State=%p\n", walk_state));
ACPI_ERROR((AE_INFO, "No result objects! State=%p", walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -222,15 +217,14 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Warning: No result object pushed! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO,
"No result object pushed! State=%p", walk_state));
return (AE_NOT_EXIST);
}
if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result objects! State=%p\n", walk_state));
ACPI_ERROR((AE_INFO, "No result objects! State=%p",
walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -250,10 +244,10 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
/* Check for a valid result object */
if (!*object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X Index=%X\n",
walk_state, state->results.num_results,
(u32) index));
ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X Index=%X",
walk_state, state->results.num_results,
(u32) index));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -288,23 +282,21 @@ acpi_ds_result_push(union acpi_operand_object * object,
state = walk_state->results;
if (!state) {
ACPI_REPORT_ERROR(("No result stack frame during push\n"));
ACPI_ERROR((AE_INFO, "No result stack frame during push"));
return (AE_AML_INTERNAL);
}
if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Result stack overflow: Obj=%p State=%p Num=%X\n",
object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Result stack overflow: Obj=%p State=%p Num=%X",
object, walk_state, state->results.num_results));
return (AE_STACK_OVERFLOW);
}
if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null Object! Obj=%p State=%p Num=%X\n",
object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Null Object! Obj=%p State=%p Num=%X",
object, walk_state, state->results.num_results));
return (AE_BAD_PARAMETER);
}
......@@ -413,10 +405,9 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
/* Check for stack overflow */
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"overflow! Obj=%p State=%p #Ops=%X\n",
object, walk_state,
walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Object stack overflow! Obj=%p State=%p #Ops=%X",
object, walk_state, walk_state->num_operands));
return (AE_STACK_OVERFLOW);
}
......@@ -460,10 +451,10 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Underflow! Count=%X State=%p #Ops=%X\n",
pop_count, walk_state,
walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Object stack underflow! Count=%X State=%p #Ops=%X",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
}
......@@ -506,10 +497,10 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Underflow! Count=%X State=%p #Ops=%X\n",
pop_count, walk_state,
walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Object stack underflow! Count=%X State=%p #Ops=%X",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
}
......@@ -826,16 +817,14 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
}
if (walk_state->data_type != ACPI_DESC_TYPE_WALK) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%p is not a valid walk state\n",
walk_state));
ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
walk_state));
return;
}
if (walk_state->parser_state.scope) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%p walk still has a scope list\n",
walk_state));
ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
walk_state));
}
/* Always must free any linked control states */
......@@ -894,25 +883,24 @@ acpi_ds_result_insert(void *object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result object pushed! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
walk_state));
return (AE_NOT_EXIST);
}
if (index >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Index out of range: %X Obj=%p State=%p Num=%X\n",
index, object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Index out of range: %X Obj=%p State=%p Num=%X",
index, object, walk_state,
state->results.num_results));
return (AE_BAD_PARAMETER);
}
if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null Object! Index=%X Obj=%p State=%p Num=%X\n",
index, object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Null Object! Index=%X Obj=%p State=%p Num=%X",
index, object, walk_state,
state->results.num_results));
return (AE_BAD_PARAMETER);
}
......@@ -986,9 +974,9 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Missing operand/stack empty! State=%p #Ops=%X\n",
walk_state, walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Missing operand/stack empty! State=%p #Ops=%X",
walk_state, walk_state->num_operands));
*object = NULL;
return (AE_AML_NO_OPERAND);
}
......@@ -1000,9 +988,9 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for a valid operand */
if (!walk_state->operands[walk_state->num_operands]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X\n",
walk_state, walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X",
walk_state, walk_state->num_operands));
*object = NULL;
return (AE_AML_NO_OPERAND);
}
......
此差异已折叠。
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -73,7 +73,7 @@ acpi_status acpi_ev_initialize_events(void)
/* Make sure we have ACPI tables */
if (!acpi_gbl_DSDT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "No ACPI tables present!\n"));
ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -84,20 +84,63 @@ acpi_status acpi_ev_initialize_events(void)
*/
status = acpi_ev_fixed_event_initialize();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize fixed events, %s\n",
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize fixed events"));
return_ACPI_STATUS(status);
}
status = acpi_ev_gpe_initialize();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize general purpose events, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize general purpose events"));
return_ACPI_STATUS(status);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_fadt_gpes
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
* (0 and 1). This causes the _PRW methods to be run, so the HW
* must be fully initialized at this point, including global lock
* support.
*
******************************************************************************/
acpi_status acpi_ev_install_fadt_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_install_fadt_gpes");
/* Namespace must be locked */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* FADT GPE Block 0 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[0]);
/* FADT GPE Block 1 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[1]);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_xrupt_handlers
......@@ -120,7 +163,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
status = acpi_ev_install_sci_handler();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to install System Control Interrupt Handler, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to install System Control Interrupt handler"));
return_ACPI_STATUS(status);
}
......@@ -128,7 +172,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
status = acpi_ev_init_global_lock_handler();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize Global Lock handler, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize Global Lock handler"));
return_ACPI_STATUS(status);
}
......@@ -262,7 +307,9 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
enable_register_id, 0,
ACPI_MTX_DO_NOT_LOCK);
ACPI_REPORT_ERROR(("No installed handler for fixed event [%08X]\n", event));
ACPI_ERROR((AE_INFO,
"No installed handler for fixed event [%08X]",
event));
return (ACPI_INTERRUPT_NOT_HANDLED);
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -372,14 +372,14 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_register_info *gpe_register_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
struct acpi_gpe_register_info *gpe_register_info;
u32 status_reg;
u32 enable_reg;
u32 flags;
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
acpi_cpu_flags flags;
acpi_native_uint i;
acpi_native_uint j;
......@@ -546,7 +546,11 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = acpi_ns_evaluate_by_handle(&info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("%s while evaluating method [%4.4s] for GPE[%2X]\n", acpi_format_exception(status), acpi_ut_get_node_name(local_gpe_event_info.dispatch.method_node), gpe_number));
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating method [%4.4s] for GPE[%2X]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
method_node), gpe_number));
}
}
......@@ -599,8 +603,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_EDGE_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
......@@ -637,8 +643,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_LEVEL_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
break;
......@@ -651,8 +659,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
......@@ -663,7 +673,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to queue handler for GPE[%2X] - event disabled\n", acpi_format_exception(status), gpe_number));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
gpe_number));
}
break;
......@@ -671,7 +683,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
/* No handler or method to run! */
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n", gpe_number));
ACPI_ERROR((AE_INFO,
"No handler or method for GPE[%2X], disabling event",
gpe_number));
/*
* Disable the GPE. The GPE will remain disabled until the ACPI
......@@ -679,13 +693,15 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
break;
}
return_VALUE(ACPI_INTERRUPT_HANDLED);
return_UINT32(ACPI_INTERRUPT_HANDLED);
}
#ifdef ACPI_GPE_NOTIFY_CHECK
......@@ -722,7 +738,9 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
ACPI_REPORT_INFO(("GPE %p was updated from wake/run to wake-only\n", gpe_event_info));
ACPI_INFO((AE_INFO,
"GPE %p was updated from wake/run to wake-only",
gpe_event_info));
/* This was a wake-only GPE */
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -78,7 +78,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
*
* RETURN: TRUE if the gpe_event is valid
*
* DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
* DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
* Should be called only when the GPE lists are semaphore locked
* and not subject to change.
*
......@@ -136,7 +136,7 @@ acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_walk_gpe_list");
......@@ -264,7 +264,7 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
* 2) Edge/Level determination is based on the 2nd character
* of the method name
*
* NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
* NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
* if a _PRW object is found that points to this GPE.
*/
switch (name[1]) {
......@@ -279,9 +279,9 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
default:
/* Unknown method type, just ignore it! */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown GPE method type: %s (name not of form _Lxx or _Exx)\n",
name));
ACPI_ERROR((AE_INFO,
"Unknown GPE method type: %s (name not of form _Lxx or _Exx)",
name));
return_ACPI_STATUS(AE_OK);
}
......@@ -291,9 +291,9 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
if (gpe_number == ACPI_UINT32_MAX) {
/* Conversion failed; invalid method, just ignore it */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)\n",
name));
ACPI_ERROR((AE_INFO,
"Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
name));
return_ACPI_STATUS(AE_OK);
}
......@@ -313,14 +313,14 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
/*
* Now we can add this information to the gpe_event_info block
* for use during dispatch of this GPE. Default type is RUNTIME, although
* for use during dispatch of this GPE. Default type is RUNTIME, although
* this may change when the _PRW methods are executed later.
*/
gpe_event_info =
&gpe_block->event_info[gpe_number - gpe_block->block_base_number];
gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD |
ACPI_GPE_TYPE_RUNTIME);
gpe_event_info->flags = (u8)
(type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
gpe_event_info->dispatch.method_node =
(struct acpi_namespace_node *)obj_handle;
......@@ -341,11 +341,11 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
*
* PARAMETERS: Callback from walk_namespace
*
* RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
* RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
* not aborted on a single _PRW failure.
*
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
* Device. Run the _PRW method. If present, extract the GPE
* Device. Run the _PRW method. If present, extract the GPE
* number and mark the GPE as a WAKE GPE.
*
******************************************************************************/
......@@ -443,6 +443,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
gpe_event_info->flags &=
~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
status =
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
if (ACPI_FAILURE(status)) {
......@@ -466,7 +467,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
*
* RETURN: A GPE interrupt block
*
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
* block per unique interrupt level used for GPEs.
* Should be called only when the GPE lists are semaphore locked
* and not subject to change.
......@@ -479,7 +480,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_get_gpe_xrupt_block");
......@@ -526,9 +527,9 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
acpi_ev_gpe_xrupt_handler,
gpe_xrupt);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not install GPE interrupt handler at level 0x%X\n",
interrupt_number));
ACPI_ERROR((AE_INFO,
"Could not install GPE interrupt handler at level 0x%X",
interrupt_number));
return_PTR(NULL);
}
}
......@@ -553,7 +554,7 @@ static acpi_status
acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
{
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_delete_gpe_xrupt");
......@@ -566,8 +567,9 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
/* Disable this interrupt */
status = acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
acpi_ev_gpe_xrupt_handler);
status =
acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
acpi_ev_gpe_xrupt_handler);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......@@ -610,7 +612,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
struct acpi_gpe_block_info *next_gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_block;
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_install_gpe_block");
......@@ -663,7 +665,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
{
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_install_gpe_block");
......@@ -743,22 +745,22 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
sizeof(struct
acpi_gpe_register_info));
if (!gpe_register_info) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not allocate the gpe_register_info table\n"));
ACPI_ERROR((AE_INFO,
"Could not allocate the gpe_register_info table"));
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
* per register. Initialization to zeros is sufficient.
*/
gpe_event_info = ACPI_MEM_CALLOCATE(((acpi_size) gpe_block->
register_count *
ACPI_GPE_REGISTER_WIDTH) *
sizeof(struct acpi_gpe_event_info));
if (!gpe_event_info) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not allocate the gpe_event_info table\n"));
ACPI_ERROR((AE_INFO,
"Could not allocate the gpe_event_info table"));
status = AE_NO_MEMORY;
goto error_exit;
}
......@@ -769,9 +771,9 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
gpe_block->event_info = gpe_event_info;
/*
* Initialize the GPE Register and Event structures. A goal of these
* Initialize the GPE Register and Event structures. A goal of these
* tables is to hide the fact that there are two separate GPE register sets
* in a given gpe hardware block, the status registers occupy the first half,
* in a given GPE hardware block, the status registers occupy the first half,
* and the enable registers occupy the second half.
*/
this_register = gpe_register_info;
......@@ -812,11 +814,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
this_event++;
}
/*
* Clear the status/enable registers. Note that status registers
* are cleared by writing a '1', while enable registers are cleared
* by writing a '0'.
*/
/* Disable all GPEs within this register */
status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
&this_register->
enable_address);
......@@ -824,6 +823,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
goto error_exit;
}
/* Clear any pending GPE events within this register */
status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
&this_register->
status_address);
......@@ -860,7 +861,9 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
*
* RETURN: Status
*
* DESCRIPTION: Create and Install a block of GPE registers
* DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
* the block are disabled at exit.
* Note: Assumes namespace is locked.
*
******************************************************************************/
......@@ -872,14 +875,8 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
u32 interrupt_number,
struct acpi_gpe_block_info **return_gpe_block)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_event_info *gpe_event_info;
acpi_native_uint i;
acpi_native_uint j;
u32 wake_gpe_count;
u32 gpe_enabled_count;
acpi_status status;
struct acpi_gpe_walk_info gpe_info;
struct acpi_gpe_block_info *gpe_block;
ACPI_FUNCTION_TRACE("ev_create_gpe_block");
......@@ -896,22 +893,24 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Initialize the new GPE block */
gpe_block->node = gpe_device;
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
gpe_block->node = gpe_device;
ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
sizeof(struct acpi_generic_address));
/* Create the register_info and event_info sub-structures */
/*
* Create the register_info and event_info sub-structures
* Note: disables and clears all GPEs in the block
*/
status = acpi_ev_create_gpe_info_blocks(gpe_block);
if (ACPI_FAILURE(status)) {
ACPI_MEM_FREE(gpe_block);
return_ACPI_STATUS(status);
}
/* Install the new block in the global list(s) */
/* Install the new block in the global lists */
status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
if (ACPI_FAILURE(status)) {
......@@ -926,16 +925,70 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_ev_save_method_info, gpe_block,
NULL);
/* Return the new block */
if (return_gpe_block) {
(*return_gpe_block) = gpe_block;
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
((gpe_block->register_count *
ACPI_GPE_REGISTER_WIDTH) - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_initialize_gpe_block
*
* PARAMETERS: gpe_device - Handle to the parent GPE block
* gpe_block - Gpe Block info
*
* RETURN: Status
*
* DESCRIPTION: Initialize and enable a GPE block. First find and run any
* _PRT methods associated with the block, then enable the
* appropriate GPEs.
* Note: Assumes namespace is locked.
*
******************************************************************************/
acpi_status
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info *gpe_block)
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_walk_info gpe_info;
u32 wake_gpe_count;
u32 gpe_enabled_count;
acpi_native_uint i;
acpi_native_uint j;
ACPI_FUNCTION_TRACE("ev_initialize_gpe_block");
/* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
if (!gpe_block) {
return_ACPI_STATUS(AE_OK);
}
/*
* Runtime option: Should Wake GPEs be enabled at runtime? The default
* is No, they should only be enabled just as the machine goes to sleep.
* Runtime option: Should wake GPEs be enabled at runtime? The default
* is no, they should only be enabled just as the machine goes to sleep.
*/
if (acpi_gbl_leave_wake_gpes_disabled) {
/*
* Differentiate RUNTIME vs WAKE GPEs, via the _PRW control methods.
* (Each GPE that has one or more _PRWs that reference it is by
* definition a WAKE GPE and will not be enabled while the machine
* is running.)
* Differentiate runtime vs wake GPEs, via the _PRW control methods.
* Each GPE that has one or more _PRWs that reference it is by
* definition a wake GPE and will not be enabled while the machine
* is running.
*/
gpe_info.gpe_block = gpe_block;
gpe_info.gpe_device = gpe_device;
......@@ -948,9 +1001,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
}
/*
* Enable all GPEs in this block that are 1) "runtime" or "run/wake" GPEs,
* and 2) have a corresponding _Lxx or _Exx method. All other GPEs must
* be enabled via the acpi_enable_gpe() external interface.
* Enable all GPEs in this block that have these attributes:
* 1) are "runtime" or "run/wake" GPEs, and
* 2) have a corresponding _Lxx or _Exx method
*
* Any other GPEs within this block must be enabled via the acpi_enable_gpe()
* external interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
......@@ -976,32 +1032,19 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
}
}
/* Dump info about this GPE block */
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
((gpe_block->register_count *
ACPI_GPE_REGISTER_WIDTH) - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
/* Enable all valid GPEs found above */
status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Found %u Wake, Enabled %u Runtime GPEs in this block\n",
wake_gpe_count, gpe_enabled_count));
/* Return the new block */
/* Enable all valid runtime GPEs found above */
if (return_gpe_block) {
(*return_gpe_block) = gpe_block;
status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not enable GPEs in gpe_block %p",
gpe_block));
}
return_ACPI_STATUS(AE_OK);
return_ACPI_STATUS(status);
}
/*******************************************************************************
......@@ -1072,8 +1115,8 @@ acpi_status acpi_ev_gpe_initialize(void)
&acpi_gbl_gpe_fadt_blocks[0]);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not create GPE Block 0, %s\n",
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Could not create GPE Block 0"));
}
}
......@@ -1086,7 +1129,12 @@ acpi_status acpi_ev_gpe_initialize(void)
if ((register_count0) &&
(gpe_number_max >= acpi_gbl_FADT->gpe1_base)) {
ACPI_REPORT_ERROR(("GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1\n", gpe_number_max, acpi_gbl_FADT->gpe1_base, acpi_gbl_FADT->gpe1_base + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1)));
ACPI_ERROR((AE_INFO,
"GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
gpe_number_max, acpi_gbl_FADT->gpe1_base,
acpi_gbl_FADT->gpe1_base +
((register_count1 *
ACPI_GPE_REGISTER_WIDTH) - 1)));
/* Ignore GPE1 block by setting the register count to zero */
......@@ -1104,7 +1152,8 @@ acpi_status acpi_ev_gpe_initialize(void)
[1]);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not create GPE Block 1, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Could not create GPE Block 1"));
}
/*
......@@ -1130,7 +1179,9 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Check for Max GPE number out-of-range */
if (gpe_number_max > ACPI_GPE_MAX) {
ACPI_REPORT_ERROR(("Maximum GPE number from FADT is too large: 0x%X\n", gpe_number_max));
ACPI_ERROR((AE_INFO,
"Maximum GPE number from FADT is too large: 0x%X",
gpe_number_max));
status = AE_BAD_VALUE;
goto cleanup;
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -303,7 +303,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
acpi_gbl_global_lock_thread_count);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not signal Global Lock semaphore\n"));
ACPI_ERROR((AE_INFO,
"Could not signal Global Lock semaphore"));
}
}
}
......@@ -344,7 +345,8 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_ev_global_lock_thread,
context);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not queue Global Lock thread, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Could not queue Global Lock thread"));
return (ACPI_INTERRUPT_NOT_HANDLED);
}
......@@ -384,7 +386,8 @@ acpi_status acpi_ev_init_global_lock_handler(void)
* with an error.
*/
if (status == AE_NO_HARDWARE_RESPONSE) {
ACPI_REPORT_ERROR(("No response from Global Lock hardware, disabling lock\n"));
ACPI_ERROR((AE_INFO,
"No response from Global Lock hardware, disabling lock"));
acpi_gbl_global_lock_present = FALSE;
status = AE_OK;
......@@ -480,7 +483,8 @@ acpi_status acpi_ev_release_global_lock(void)
ACPI_FUNCTION_TRACE("ev_release_global_lock");
if (!acpi_gbl_global_lock_thread_count) {
ACPI_REPORT_WARNING(("Cannot release HW Global Lock, it has not been acquired\n"));
ACPI_WARNING((AE_INFO,
"Cannot release HW Global Lock, it has not been acquired"));
return_ACPI_STATUS(AE_NOT_ACQUIRED);
}
......@@ -542,9 +546,9 @@ void acpi_ev_terminate(void)
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
status = acpi_disable_event((u32) i, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not disable fixed event %d\n",
(u32) i));
ACPI_ERROR((AE_INFO,
"Could not disable fixed event %d",
(u32) i));
}
}
......@@ -556,8 +560,7 @@ void acpi_ev_terminate(void)
status = acpi_ev_remove_sci_handler();
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not remove SCI handler\n"));
ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
}
}
......@@ -570,8 +573,7 @@ void acpi_ev_terminate(void)
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
status = acpi_disable();
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"acpi_disable failed\n"));
ACPI_WARNING((AE_INFO, "acpi_disable failed"));
}
}
return_VOID;
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -295,12 +295,12 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
handler_desc = region_obj->region.handler;
if (!handler_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No handler for Region [%4.4s] (%p) [%s]\n",
acpi_ut_get_node_name(region_obj->region.
node), region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
ACPI_ERROR((AE_INFO,
"No handler for Region [%4.4s] (%p) [%s]",
acpi_ut_get_node_name(region_obj->region.node),
region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
return_ACPI_STATUS(AE_NOT_EXIST);
}
......@@ -317,12 +317,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
if (!region_setup) {
/* No initialization routine, exit with error */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No init routine for region(%p) [%s]\n",
region_obj,
acpi_ut_get_region_name(region_obj->
region.
space_id)));
ACPI_ERROR((AE_INFO,
"No init routine for region(%p) [%s]",
region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
return_ACPI_STATUS(AE_NOT_EXIST);
}
......@@ -347,12 +346,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Check for failure of the Region Setup */
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region Init: %s [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name(region_obj->
region.
space_id)));
ACPI_EXCEPTION((AE_INFO, status,
"During region initialization: [%s]",
acpi_ut_get_region_name(region_obj->
region.
space_id)));
return_ACPI_STATUS(status);
}
......@@ -406,10 +404,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Handler for [%s] returned %s\n",
acpi_ut_get_region_name(region_obj->region.
space_id),
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
space_id)));
}
if (!
......@@ -501,12 +498,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
status = acpi_ev_execute_reg_method(region_obj, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%s from region _REG, [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name
(region_obj->region.
space_id)));
ACPI_EXCEPTION((AE_INFO, status,
"from region _REG, [%s]",
acpi_ut_get_region_name
(region_obj->region.space_id)));
}
if (acpi_ns_is_locked) {
......@@ -528,12 +523,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%s from region init, [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name
(region_obj->region.
space_id)));
ACPI_EXCEPTION((AE_INFO, status,
"from region init, [%s]",
acpi_ut_get_region_name
(region_obj->region.space_id)));
}
region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -233,7 +233,11 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*/
status = AE_OK;
} else {
ACPI_REPORT_ERROR(("Could not install pci_config handler for Root Bridge %4.4s, %s\n", acpi_ut_get_node_name(pci_root_node), acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO,
status,
"Could not install pci_config handler for Root Bridge %4.4s",
acpi_ut_get_node_name
(pci_root_node)));
}
}
break;
......
......@@ -6,7 +6,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -88,7 +88,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled);
return_UINT32(interrupt_handled);
}
/*******************************************************************************
......@@ -121,7 +121,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled);
return_UINT32(interrupt_handled);
}
/******************************************************************************
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -143,8 +143,8 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"Could not enable fixed event.\n"));
ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
event));
/* Remove the handler */
......@@ -204,10 +204,11 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
acpi_gbl_fixed_event_handlers[event].context = NULL;
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"Could not write to fixed event enable register.\n"));
ACPI_WARNING((AE_INFO,
"Could not write to fixed event enable register %X",
event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X.\n",
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
event));
}
......@@ -434,7 +435,7 @@ acpi_remove_notify_handler(acpi_handle device,
if (device == ACPI_ROOT_OBJECT) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Removing notify handler for ROOT object.\n"));
"Removing notify handler for namespace root object\n"));
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
!acpi_gbl_system_notify.handler) ||
......@@ -562,7 +563,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_install_gpe_handler");
......@@ -653,7 +654,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler");
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -70,8 +70,7 @@ acpi_status acpi_enable(void)
/* Make sure we have the FADT */
if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"No FADT information present!\n"));
ACPI_WARNING((AE_INFO, "No FADT information present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -83,7 +82,8 @@ acpi_status acpi_enable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not transition to ACPI mode.\n"));
ACPI_ERROR((AE_INFO,
"Could not transition to ACPI mode"));
return_ACPI_STATUS(status);
}
......@@ -113,8 +113,7 @@ acpi_status acpi_disable(void)
ACPI_FUNCTION_TRACE("acpi_disable");
if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"No FADT information present!\n"));
ACPI_WARNING((AE_INFO, "No FADT information present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -127,8 +126,8 @@ acpi_status acpi_disable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not exit ACPI mode to legacy mode"));
ACPI_ERROR((AE_INFO,
"Could not exit ACPI mode to legacy mode"));
return_ACPI_STATUS(status);
}
......@@ -185,9 +184,9 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
}
if (value != 1) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not enable %s event\n",
acpi_ut_get_event_name(event)));
ACPI_ERROR((AE_INFO,
"Could not enable %s event",
acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
......@@ -384,9 +383,9 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
}
if (value != 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not disable %s events\n",
acpi_ut_get_event_name(event)));
ACPI_ERROR((AE_INFO,
"Could not disable %s events",
acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
......@@ -626,6 +625,13 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
/* Run the _PRW methods and enable the GPEs */
status = acpi_ev_initialize_gpe_block(node, gpe_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/* Get the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -413,9 +413,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
(!ACPI_STRNCMP(table_ptr->signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Table has invalid signature [%4.4s], must be SSDT or PSDT\n",
table_ptr->signature));
ACPI_ERROR((AE_INFO,
"Table has invalid signature [%4.4s], must be SSDT or PSDT",
table_ptr->signature));
status = AE_BAD_SIGNATURE;
goto cleanup;
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -504,18 +504,12 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
}
/*
* Perform the conversion.
* Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above)
*/
string_length--;
if (string_length > ACPI_MAX_STRING_CONVERSION) { /* ACPI limit */
return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
/* Create a new string object and string buffer */
return_desc =
acpi_ut_create_string_object((acpi_size) string_length);
acpi_ut_create_string_object((acpi_size)
(string_length - 1));
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
......@@ -647,7 +641,9 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
break;
default:
ACPI_REPORT_ERROR(("Bad destination type during conversion: %X\n", destination_type));
ACPI_ERROR((AE_INFO,
"Bad destination type during conversion: %X",
destination_type));
status = AE_AML_INTERNAL;
break;
}
......@@ -660,17 +656,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown Target type ID 0x%X Op %s dest_type %s\n",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->op_info->name,
acpi_ut_get_type_name(destination_type)));
ACPI_REPORT_ERROR(("Bad Target Type (ARGI): %X\n",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args)))
status = AE_AML_INTERNAL;
ACPI_ERROR((AE_INFO,
"Unknown Target type ID 0x%X aml_opcode %X dest_type %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
acpi_ut_get_type_name(destination_type)));
status = AE_AML_INTERNAL;
}
/*
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -300,8 +300,8 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
ACPI_REPORT_ERROR(("Invalid address_space type %X\n",
region_space));
ACPI_ERROR((AE_INFO, "Invalid address_space type %X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
......
此差异已折叠。
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -249,13 +249,18 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
* Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE).
*/
if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
ACPI_REPORT_ERROR(("SMBus write requires Buffer, found type %s\n", acpi_ut_get_object_type_name(source_desc)));
ACPI_ERROR((AE_INFO,
"SMBus write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) {
ACPI_REPORT_ERROR(("SMBus write requires Buffer of length %X, found length %X\n", ACPI_SMBUS_BUFFER_SIZE, source_desc->buffer.length));
ACPI_ERROR((AE_INFO,
"SMBus write requires Buffer of length %X, found length %X",
ACPI_SMBUS_BUFFER_SIZE,
source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -94,10 +94,9 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */
if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Needed Region, found type %X (%s)\n",
ACPI_GET_OBJECT_TYPE(rgn_desc),
acpi_ut_get_object_type_name(rgn_desc)));
ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
ACPI_GET_OBJECT_TYPE(rgn_desc),
acpi_ut_get_object_type_name(rgn_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
......@@ -162,31 +161,28 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* than the region itself. For example, a region of length one
* byte, and a field with Dword access specified.
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)\n",
acpi_ut_get_node_name(obj_desc->
common_field.
node),
obj_desc->common_field.
access_byte_width,
acpi_ut_get_node_name(rgn_desc->
region.node),
rgn_desc->region.length));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
acpi_ut_get_node_name(obj_desc->
common_field.node),
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.
node),
rgn_desc->region.length));
}
/*
* Offset rounded up to next multiple of field width
* exceeds region length, indicate an error
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)\n",
acpi_ut_get_node_name(obj_desc->common_field.
node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.node),
rgn_desc->region.length));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
acpi_ut_get_node_name(obj_desc->common_field.node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.node),
rgn_desc->region.length));
return_ACPI_STATUS(AE_AML_REGION_LIMIT);
}
......@@ -270,18 +266,17 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region %s(%X) not implemented\n",
acpi_ut_get_region_name(rgn_desc->
region.
space_id),
rgn_desc->region.space_id));
ACPI_ERROR((AE_INFO,
"Region %s(%X) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_REPORT_ERROR(("Region %s(%X) has no handler\n",
acpi_ut_get_region_name(rgn_desc->
region.
space_id),
rgn_desc->region.space_id));
ACPI_ERROR((AE_INFO,
"Region %s(%X) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
}
}
......@@ -514,8 +509,8 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
default:
ACPI_REPORT_ERROR(("Wrong object type in field I/O %X\n",
ACPI_GET_OBJECT_TYPE(obj_desc)));
ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_INTERNAL;
break;
}
......@@ -618,11 +613,11 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"write_with_update_rule: Unknown update_rule setting: %X\n",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
ACPI_ERROR((AE_INFO,
"Unknown update_rule value: %X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
}
......@@ -677,10 +672,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field size %X (bits) is too large for buffer (%X)\n",
obj_desc->common_field.bit_length,
buffer_length));
ACPI_ERROR((AE_INFO,
"Field size %X (bits) is too large for buffer (%X)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
......@@ -792,10 +786,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field size %X (bits) is too large for buffer (%X)\n",
obj_desc->common_field.bit_length,
buffer_length));
ACPI_ERROR((AE_INFO,
"Field size %X (bits) is too large for buffer (%X)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
......
此差异已折叠。
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -153,7 +153,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Sanity check -- we must have a valid thread ID */
if (!walk_state->thread) {
ACPI_REPORT_ERROR(("Cannot acquire Mutex [%4.4s], null thread info\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -162,7 +164,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
* mutex. This mechanism provides some deadlock prevention
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_REPORT_ERROR(("Cannot acquire Mutex [%4.4s], incorrect sync_level\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], incorrect sync_level",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
......@@ -237,14 +241,18 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* The mutex must have been previously acquired in order to release it */
if (!obj_desc->mutex.owner_thread) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], not acquired\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
/* Sanity check -- we must have a valid thread ID */
if (!walk_state->thread) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], null thread info\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -255,7 +263,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
if ((obj_desc->mutex.owner_thread->thread_id !=
walk_state->thread->thread_id)
&& (obj_desc->mutex.semaphore != acpi_gbl_global_lock_semaphore)) {
ACPI_REPORT_ERROR(("Thread %X cannot release Mutex [%4.4s] acquired by thread %X\n", walk_state->thread->thread_id, acpi_ut_get_node_name(obj_desc->mutex.node), obj_desc->mutex.owner_thread->thread_id));
ACPI_ERROR((AE_INFO,
"Thread %X cannot release Mutex [%4.4s] acquired by thread %X",
walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.owner_thread->thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
......@@ -264,7 +276,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* equal to the current sync level
*/
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], incorrect sync_level\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], incorrect sync_level",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册