提交 33052057 编写于 作者: T Trond Myklebust
...@@ -452,6 +452,11 @@ running once the system is up. ...@@ -452,6 +452,11 @@ running once the system is up.
eata= [HW,SCSI] eata= [HW,SCSI]
ec_intr= [HW,ACPI] ACPI Embedded Controller interrupt mode
Format: <int>
0: polling mode
non-0: interrupt mode (default)
eda= [HW,PS2] eda= [HW,PS2]
edb= [HW,PS2] edb= [HW,PS2]
......
...@@ -837,8 +837,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. ...@@ -837,8 +837,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
Module for AC'97 motherboards from Intel and compatibles. Module for AC'97 motherboards from Intel and compatibles.
* Intel i810/810E, i815, i820, i830, i84x, MX440 * Intel i810/810E, i815, i820, i830, i84x, MX440
ICH5, ICH6, ICH7, ESB2
* SiS 7012 (SiS 735) * SiS 7012 (SiS 735)
* NVidia NForce, NForce2 * NVidia NForce, NForce2, NForce3, MCP04, CK804
CK8, CK8S, MCP501
* AMD AMD768, AMD8111 * AMD AMD768, AMD8111
* ALi m5455 * ALi m5455
...@@ -868,6 +870,12 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. ...@@ -868,6 +870,12 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
-------------------- --------------------
Module for Intel ICH (i8x0) chipset MC97 modems. Module for Intel ICH (i8x0) chipset MC97 modems.
* Intel i810/810E, i815, i820, i830, i84x, MX440
ICH5, ICH6, ICH7
* SiS 7013 (SiS 735)
* NVidia NForce, NForce2, NForce2s, NForce3
* AMD AMD8111
* ALi m5455
ac97_clock - AC'97 codec clock base (0 = auto-detect) ac97_clock - AC'97 codec clock base (0 = auto-detect)
......
...@@ -5206,14 +5206,14 @@ struct _snd_pcm_runtime { ...@@ -5206,14 +5206,14 @@ struct _snd_pcm_runtime {
You need to pass the <function>snd_dma_pci_data(pci)</function>, You need to pass the <function>snd_dma_pci_data(pci)</function>,
where pci is the struct <structname>pci_dev</structname> pointer where pci is the struct <structname>pci_dev</structname> pointer
of the chip as well. of the chip as well.
The <type>snd_sg_buf_t</type> instance is created as The <type>struct snd_sg_buf</type> instance is created as
substream-&gt;dma_private. You can cast substream-&gt;dma_private. You can cast
the pointer like: the pointer like:
<informalexample> <informalexample>
<programlisting> <programlisting>
<![CDATA[ <![CDATA[
struct snd_sg_buf *sgbuf = (struct snd_sg_buf_t*)substream->dma_private; struct snd_sg_buf *sgbuf = (struct snd_sg_buf *)substream->dma_private;
]]> ]]>
</programlisting> </programlisting>
</informalexample> </informalexample>
......
...@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o ...@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),) ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o obj-y += cstate.o processor.o
endif endif
...@@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) ...@@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
* success: return IRQ number (>=0) * success: return IRQ number (>=0)
* failure: return < 0 * failure: return < 0
*/ */
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{ {
unsigned int irq; unsigned int irq;
unsigned int plat_gsi = gsi; unsigned int plat_gsi = gsi;
...@@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) ...@@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
extern void eisa_set_level_irq(unsigned int irq); extern void eisa_set_level_irq(unsigned int irq);
if (edge_level == ACPI_LEVEL_SENSITIVE) if (triggering == ACPI_LEVEL_SENSITIVE)
eisa_set_level_irq(gsi); eisa_set_level_irq(gsi);
} }
#endif #endif
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) { if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low); plat_gsi = mp_register_gsi(gsi, triggering, polarity);
} }
#endif #endif
acpi_gsi_to_irq(plat_gsi, &irq); acpi_gsi_to_irq(plat_gsi, &irq);
......
...@@ -14,64 +14,6 @@ ...@@ -14,64 +14,6 @@
#include <acpi/processor.h> #include <acpi/processor.h>
#include <asm/acpi.h> #include <asm/acpi.h>
static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
*pow)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pow->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
unsigned int cpu)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
pow->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
acpi_processor_power_init_intel_pdc(pow);
return;
}
EXPORT_SYMBOL(acpi_processor_power_init_pdc);
/* /*
* Initialize bm_flags based on the CPU cache properties * Initialize bm_flags based on the CPU cache properties
* On SMP it depends on cache configuration * On SMP it depends on cache configuration
......
/*
* arch/i386/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
...@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq ( ...@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
} }
/*
* acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
* driver/acpi/processor.c
*/
static void
acpi_processor_cpu_init_pdc_est(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc_est\n");
if (!cpu_has(c, X86_FEATURE_EST))
return;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
/* CPU specific PDC initialization */
static void
acpi_processor_cpu_init_pdc(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc\n");
perf->pdc = NULL;
if (cpu_has(c, X86_FEATURE_EST))
acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
return;
}
static int static int
acpi_cpufreq_cpu_init ( acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy) struct cpufreq_policy *policy)
...@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init ( ...@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
unsigned int result = 0; unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n"); dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data) if (!data)
...@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init ( ...@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data; acpi_io_data[cpu] = data;
acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu); result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result) if (result)
goto err_free; goto err_free;
......
...@@ -362,22 +362,10 @@ static struct acpi_processor_performance p; ...@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
*/ */
static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
{ {
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
unsigned long cur_freq; unsigned long cur_freq;
int result = 0, i; int result = 0, i;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
/* _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
arg0_buf[0] = ACPI_PDC_REVISION_ID;
arg0_buf[1] = 1;
arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
p.pdc = &arg_list;
/* register with ACPI core */ /* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) { if (acpi_processor_register_performance(&p, cpu)) {
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n"); dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
......
...@@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void) ...@@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096 #define MAX_GSI_NUM 4096
int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) int mp_register_gsi (u32 gsi, int triggering, int polarity)
{ {
int ioapic = -1; int ioapic = -1;
int ioapic_pin = 0; int ioapic_pin = 0;
...@@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) ...@@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) { if (triggering == ACPI_LEVEL_SENSITIVE) {
/* /*
* For PCI devices assign IRQs in order, avoiding gaps * For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins. * due to unused I/O APIC pins.
...@@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low) ...@@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
} }
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi; return gsi;
} }
......
...@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o ...@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += acpi-processor.o
endif
obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
...@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context) ...@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
struct acpi_vendor_info *info = (struct acpi_vendor_info *)context; struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
struct acpi_resource_vendor *vendor; struct acpi_resource_vendor *vendor;
struct acpi_vendor_descriptor *descriptor; struct acpi_vendor_descriptor *descriptor;
u32 length; u32 byte_length;
if (resource->id != ACPI_RSTYPE_VENDOR) if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
return AE_OK; return AE_OK;
vendor = (struct acpi_resource_vendor *)&resource->data; vendor = (struct acpi_resource_vendor *)&resource->data;
descriptor = (struct acpi_vendor_descriptor *)vendor->reserved; descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
if (vendor->length <= sizeof(*info->descriptor) || if (vendor->byte_length <= sizeof(*info->descriptor) ||
descriptor->guid_id != info->descriptor->guid_id || descriptor->guid_id != info->descriptor->guid_id ||
efi_guidcmp(descriptor->guid, info->descriptor->guid)) efi_guidcmp(descriptor->guid, info->descriptor->guid))
return AE_OK; return AE_OK;
length = vendor->length - sizeof(struct acpi_vendor_descriptor); byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
info->data = acpi_os_allocate(length); info->data = acpi_os_allocate(byte_length);
if (!info->data) if (!info->data)
return AE_NO_MEMORY; return AE_NO_MEMORY;
memcpy(info->data, memcpy(info->data,
vendor->reserved + sizeof(struct acpi_vendor_descriptor), vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
length); byte_length);
info->length = length; info->length = byte_length;
return AE_CTRL_TERMINATE; return AE_CTRL_TERMINATE;
} }
acpi_status acpi_status
acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
u8 ** data, u32 * length) u8 ** data, u32 * byte_length)
{ {
struct acpi_vendor_info info; struct acpi_vendor_info info;
...@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, ...@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
return AE_NOT_FOUND; return AE_NOT_FOUND;
*data = info.data; *data = info.data;
*length = info.length; *byte_length = info.length;
return AE_OK; return AE_OK;
} }
......
/*
* arch/ia64/kernel/cpufreq/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
pr->pdc = NULL;
init_intel_pdc(pr);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
...@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void) ...@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void)
* success: return IRQ number (>=0) * success: return IRQ number (>=0)
* failure: return < 0 * failure: return < 0
*/ */
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{ {
if (has_8259 && gsi < 16) if (has_8259 && gsi < 16)
return isa_irq_to_vector(gsi); return isa_irq_to_vector(gsi);
return iosapic_register_intr(gsi, return iosapic_register_intr(gsi,
(active_high_low == (polarity ==
ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
IOSAPIC_POL_LOW, IOSAPIC_POL_LOW,
(edge_level == (triggering ==
ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
IOSAPIC_LEVEL); IOSAPIC_LEVEL);
} }
......
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
...@@ -269,48 +269,6 @@ acpi_cpufreq_verify ( ...@@ -269,48 +269,6 @@ acpi_cpufreq_verify (
} }
/*
* processor_init_pdc - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly. Actual call to _PDC is done in driver/acpi/processor.c
*/
static void
processor_init_pdc (
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
dprintk("processor_init_pdc\n");
perf->pdc = NULL;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
static int static int
acpi_cpufreq_cpu_init ( acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy) struct cpufreq_policy *policy)
...@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init ( ...@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data; struct cpufreq_acpi_io *data;
unsigned int result = 0; unsigned int result = 0;
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n"); dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data) if (!data)
...@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init ( ...@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data; acpi_io_data[cpu] = data;
processor_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu); result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result) if (result)
goto err_free; goto err_free;
......
...@@ -193,12 +193,12 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr) ...@@ -193,12 +193,12 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
goto free_resource; goto free_resource;
} }
min = addr->min_address_range; min = addr->minimum;
max = min + addr->address_length - 1; max = min + addr->address_length - 1;
if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION) if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
sparse = 1; sparse = 1;
space_nr = new_space(addr->address_translation_offset, sparse); space_nr = new_space(addr->translation_offset, sparse);
if (space_nr == ~0) if (space_nr == ~0)
goto free_name; goto free_name;
...@@ -285,7 +285,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) ...@@ -285,7 +285,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
if (addr.resource_type == ACPI_MEMORY_RANGE) { if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM; flags = IORESOURCE_MEM;
root = &iomem_resource; root = &iomem_resource;
offset = addr.address_translation_offset; offset = addr.translation_offset;
} else if (addr.resource_type == ACPI_IO_RANGE) { } else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO; flags = IORESOURCE_IO;
root = &ioport_resource; root = &ioport_resource;
...@@ -298,7 +298,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) ...@@ -298,7 +298,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
window = &info->controller->window[info->controller->windows++]; window = &info->controller->window[info->controller->windows++];
window->resource.name = info->name; window->resource.name = info->name;
window->resource.flags = flags; window->resource.flags = flags;
window->resource.start = addr.min_address_range + offset; window->resource.start = addr.minimum + offset;
window->resource.end = window->resource.start + addr.address_length - 1; window->resource.end = window->resource.start + addr.address_length - 1;
window->resource.child = NULL; window->resource.child = NULL;
window->offset = offset; window->offset = offset;
......
...@@ -58,6 +58,7 @@ pcibios_find_pci_bus(struct device_node *dn) ...@@ -58,6 +58,7 @@ pcibios_find_pci_bus(struct device_node *dn)
return find_bus_among_children(pdn->phb->bus, dn); return find_bus_among_children(pdn->phb->bus, dn);
} }
EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
/** /**
* pcibios_remove_pci_devices - remove all devices under this bus * pcibios_remove_pci_devices - remove all devices under this bus
...@@ -106,6 +107,7 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus) ...@@ -106,6 +107,7 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
} }
} }
} }
EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
static int static int
pcibios_pci_config_bridge(struct pci_dev *dev) pcibios_pci_config_bridge(struct pci_dev *dev)
...@@ -172,3 +174,4 @@ pcibios_add_pci_devices(struct pci_bus * bus) ...@@ -172,3 +174,4 @@ pcibios_add_pci_devices(struct pci_bus * bus)
pcibios_pci_config_bridge(dev); pcibios_pci_config_bridge(dev);
} }
} }
EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
obj-y := boot.o obj-y := boot.o
boot-y := ../../../i386/kernel/acpi/boot.o boot-y := ../../../i386/kernel/acpi/boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
endif
/*
* arch/x86_64/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
...@@ -2027,7 +2027,7 @@ int __init io_apic_get_redir_entries (int ioapic) ...@@ -2027,7 +2027,7 @@ int __init io_apic_get_redir_entries (int ioapic)
} }
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low) int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{ {
struct IO_APIC_route_entry entry; struct IO_APIC_route_entry entry;
unsigned long flags; unsigned long flags;
...@@ -2049,8 +2049,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a ...@@ -2049,8 +2049,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.delivery_mode = INT_DELIVERY_MODE; entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE; entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
entry.trigger = edge_level; entry.trigger = triggering;
entry.polarity = active_high_low; entry.polarity = polarity;
entry.mask = 1; /* Disabled (masked) */ entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq); irq = gsi_irq_sharing(irq);
...@@ -2065,9 +2065,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a ...@@ -2065,9 +2065,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", ioapic, "IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq, mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
edge_level, active_high_low); triggering, polarity);
ioapic_register_intr(irq, entry.vector, edge_level); ioapic_register_intr(irq, entry.vector, triggering);
if (!ioapic && (irq < 16)) if (!ioapic && (irq < 16))
disable_8259A_irq(irq); disable_8259A_irq(irq);
......
...@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void) ...@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096 #define MAX_GSI_NUM 4096
int mp_register_gsi(u32 gsi, int edge_level, int active_high_low) int mp_register_gsi(u32 gsi, int triggering, int polarity)
{ {
int ioapic = -1; int ioapic = -1;
int ioapic_pin = 0; int ioapic_pin = 0;
...@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low) ...@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) { if (triggering == ACPI_LEVEL_SENSITIVE) {
/* /*
* For PCI devices assign IRQs in order, avoiding gaps * For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins. * due to unused I/O APIC pins.
...@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low) ...@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
} }
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1, triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1); polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi; return gsi;
} }
......
...@@ -267,7 +267,6 @@ config ACPI_DEBUG ...@@ -267,7 +267,6 @@ config ACPI_DEBUG
config ACPI_EC config ACPI_EC
bool bool
depends on X86
default y default y
help help
This driver is required on some systems for the proper operation of This driver is required on some systems for the proper operation of
......
...@@ -71,8 +71,8 @@ static struct acpi_driver acpi_memory_device_driver = { ...@@ -71,8 +71,8 @@ static struct acpi_driver acpi_memory_device_driver = {
struct acpi_memory_device { struct acpi_memory_device {
acpi_handle handle; acpi_handle handle;
unsigned int state; /* State of the memory device */ unsigned int state; /* State of the memory device */
unsigned short cache_attribute; /* memory cache attribute */ unsigned short caching; /* memory cache attribute */
unsigned short read_write_attribute; /* memory read/write attribute */ unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */ u64 start_addr; /* Memory Range start physical addr */
u64 end_addr; /* Memory Range end physical addr */ u64 end_addr; /* Memory Range end physical addr */
}; };
...@@ -97,12 +97,12 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) ...@@ -97,12 +97,12 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) { if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */ /* Populate the structure */
mem_device->cache_attribute = mem_device->caching =
address64.attribute.memory.cache_attribute; address64.info.mem.caching;
mem_device->read_write_attribute = mem_device->write_protect =
address64.attribute.memory.read_write_attribute; address64.info.mem.write_protect;
mem_device->start_addr = address64.min_address_range; mem_device->start_addr = address64.minimum;
mem_device->end_addr = address64.max_address_range; mem_device->end_addr = address64.maximum;
} }
} }
...@@ -250,7 +250,6 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) ...@@ -250,7 +250,6 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
int result; int result;
u64 start = mem_device->start_addr; u64 start = mem_device->start_addr;
u64 len = mem_device->end_addr - start + 1; u64 len = mem_device->end_addr - start + 1;
unsigned long attr = mem_device->read_write_attribute;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device"); ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
......
...@@ -78,9 +78,9 @@ MODULE_LICENSE("GPL"); ...@@ -78,9 +78,9 @@ MODULE_LICENSE("GPL");
static uid_t asus_uid; static uid_t asus_uid;
static gid_t asus_gid; static gid_t asus_gid;
module_param(asus_uid, uint, 0); module_param(asus_uid, uint, 0);
MODULE_PARM_DESC(uid, "UID for entries in /proc/acpi/asus.\n"); MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus.\n");
module_param(asus_gid, uint, 0); module_param(asus_gid, uint, 0);
MODULE_PARM_DESC(gid, "GID for entries in /proc/acpi/asus.\n"); MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus.\n");
/* For each model, all features implemented, /* For each model, all features implemented,
* those marked with R are relative to HOTK, A for absolute */ * those marked with R are relative to HOTK, A for absolute */
...@@ -302,7 +302,7 @@ static struct model_data model_conf[END_MODEL] = { ...@@ -302,7 +302,7 @@ static struct model_data model_conf[END_MODEL] = {
.brightness_set = "SPLV", .brightness_set = "SPLV",
.brightness_get = "GPLV", .brightness_get = "GPLV",
.display_set = "SDSP", .display_set = "SDSP",
.display_get = "\\SSTE"}, .display_get = "\\_SB.PCI0.P0P1.VGA.GETD"},
{ {
.name = "M6R", .name = "M6R",
.mt_mled = "MLED", .mt_mled = "MLED",
...@@ -851,6 +851,8 @@ static int __init asus_hotk_add_fs(struct acpi_device *device) ...@@ -851,6 +851,8 @@ static int __init asus_hotk_add_fs(struct acpi_device *device)
mode = S_IFREG | S_IRUGO | S_IWUGO; mode = S_IFREG | S_IRUGO | S_IWUGO;
} else { } else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
"deprecated, use chown and chmod instead!\n");
} }
acpi_device_dir(device) = asus_proc_dir; acpi_device_dir(device) = asus_proc_dir;
...@@ -987,9 +989,21 @@ static int __init asus_hotk_get_info(void) ...@@ -987,9 +989,21 @@ static int __init asus_hotk_get_info(void)
printk(KERN_NOTICE " BSTS called, 0x%02x returned\n", printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
bsts_result); bsts_result);
/* Samsung P30 has a device with a valid _HID whose INIT does not /* This is unlikely with implicit return */
* return anything. Catch this one and any similar here */ if (buffer.pointer == NULL)
if (buffer.pointer == NULL) { return -EINVAL;
model = (union acpi_object *) buffer.pointer;
/*
* Samsung P30 has a device with a valid _HID whose INIT does not
* return anything. It used to be possible to catch this exception,
* but the implicit return code will now happily confuse the
* driver. We assume that every ACPI_TYPE_STRING is a valid model
* identifier but it's still possible to get completely bogus data.
*/
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ", model->string.pointer);
} else {
if (asus_info && /* Samsung P30 */ if (asus_info && /* Samsung P30 */
strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) { strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
hotk->model = P30; hotk->model = P30;
...@@ -1002,13 +1016,10 @@ static int __init asus_hotk_get_info(void) ...@@ -1002,13 +1016,10 @@ static int __init asus_hotk_get_info(void)
"the developers with your DSDT\n"); "the developers with your DSDT\n");
} }
hotk->methods = &model_conf[hotk->model]; hotk->methods = &model_conf[hotk->model];
return AE_OK;
}
model = (union acpi_object *)buffer.pointer; acpi_os_free(model);
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ", return AE_OK;
model->string.pointer);
} }
hotk->model = END_MODEL; hotk->model = END_MODEL;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -128,7 +128,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, ...@@ -128,7 +128,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags, walk_state, ACPI_IMODE_LOAD_PASS1, flags, walk_state,
&(node)); &(node));
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status); ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
} }
...@@ -232,7 +232,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, ...@@ -232,7 +232,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ (acpi_integer) arg->common.value.size; + (acpi_integer) arg->common.value.size;
if (position > ACPI_UINT32_MAX) { if (position > ACPI_UINT32_MAX) {
ACPI_REPORT_ERROR(("Bit offset within field too large (> 0xFFFFFFFF)\n")); ACPI_ERROR((AE_INFO,
"Bit offset within field too large (> 0xFFFFFFFF)"));
return_ACPI_STATUS(AE_SUPPORT); return_ACPI_STATUS(AE_SUPPORT);
} }
...@@ -268,7 +269,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, ...@@ -268,7 +269,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE, ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node); walk_state, &info->field_node);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR((char *)&arg->named.name, ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
status); status);
if (status != AE_ALREADY_EXISTS) { if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
...@@ -293,7 +294,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, ...@@ -293,7 +294,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ (acpi_integer) arg->common.value.size; + (acpi_integer) arg->common.value.size;
if (position > ACPI_UINT32_MAX) { if (position > ACPI_UINT32_MAX) {
ACPI_REPORT_ERROR(("Field [%4.4s] bit offset too large (> 0xFFFFFFFF)\n", (char *)&info->field_node->name)); ACPI_ERROR((AE_INFO,
"Field [%4.4s] bit offset too large (> 0xFFFFFFFF)",
ACPI_CAST_PTR(char,
&info->field_node->
name)));
return_ACPI_STATUS(AE_SUPPORT); return_ACPI_STATUS(AE_SUPPORT);
} }
...@@ -302,8 +307,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, ...@@ -302,8 +307,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Invalid opcode in field list: %X\n", "Invalid opcode in field list: %X",
arg->common.aml_opcode)); arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE); return_ACPI_STATUS(AE_AML_BAD_OPCODE);
} }
...@@ -349,7 +354,7 @@ acpi_ds_create_field(union acpi_parse_object *op, ...@@ -349,7 +354,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node); walk_state, &region_node);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.name, status); ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
} }
...@@ -431,7 +436,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op, ...@@ -431,7 +436,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_NS_ERROR_IF_FOUND, ACPI_NS_ERROR_IF_FOUND,
walk_state, &node); walk_state, &node);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR((char *)&arg->named.name, ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
status); status);
if (status != AE_ALREADY_EXISTS) { if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
...@@ -488,7 +493,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op, ...@@ -488,7 +493,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node); walk_state, &region_node);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.name, status); ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
} }
...@@ -502,7 +507,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op, ...@@ -502,7 +507,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state, ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node); &info.register_node);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status); ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -560,7 +565,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op, ...@@ -560,7 +565,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state, ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node); &info.register_node);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status); ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -573,7 +578,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op, ...@@ -573,7 +578,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state, ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node); &info.data_register_node);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status); ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -84,7 +84,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ...@@ -84,7 +84,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
acpi_object_type type; acpi_object_type type;
acpi_status status; acpi_status status;
ACPI_FUNCTION_NAME("ds_init_one_object"); ACPI_FUNCTION_ENTRY();
/* /*
* We are only interested in NS nodes owned by the table that * We are only interested in NS nodes owned by the table that
...@@ -105,11 +105,10 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ...@@ -105,11 +105,10 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
status = acpi_ds_initialize_region(obj_handle); status = acpi_ds_initialize_region(obj_handle);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_EXCEPTION((AE_INFO, status,
"Region %p [%4.4s] - Init failure, %s\n", "During Region initialization %p [%4.4s]",
obj_handle, obj_handle,
acpi_ut_get_node_name(obj_handle), acpi_ut_get_node_name(obj_handle)));
acpi_format_exception(status)));
} }
info->op_region_count++; info->op_region_count++;
...@@ -117,14 +116,6 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ...@@ -117,14 +116,6 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
case ACPI_TYPE_METHOD: case ACPI_TYPE_METHOD:
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/* /*
* Set the execution data width (32 or 64) based upon the * Set the execution data width (32 or 64) based upon the
* revision number of the parent ACPI table. * revision number of the parent ACPI table.
...@@ -134,6 +125,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ...@@ -134,6 +125,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
if (info->table_desc->pointer->revision == 1) { if (info->table_desc->pointer->revision == 1) {
node->flags |= ANOBJ_DATA_WIDTH_32; node->flags |= ANOBJ_DATA_WIDTH_32;
} }
#ifdef ACPI_INIT_PARSE_METHODS
/*
* Note 11/2005: Removed this code to parse all methods during table
* load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/* /*
* Always parse methods to detect errors, we will delete * Always parse methods to detect errors, we will delete
...@@ -141,15 +147,15 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ...@@ -141,15 +147,15 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*/ */
status = acpi_ds_parse_method(obj_handle); status = acpi_ds_parse_method(obj_handle);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"\n+Method %p [%4.4s] - parse failure, %s\n", "Method %p [%4.4s] - parse failure, %s",
obj_handle, obj_handle,
acpi_ut_get_node_name(obj_handle), acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status))); acpi_format_exception(status)));
/* This parse failed, but we will continue parsing more methods */ /* This parse failed, but we will continue parsing more methods */
} }
#endif
info->method_count++; info->method_count++;
break; break;
...@@ -207,8 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc, ...@@ -207,8 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL); acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "walk_namespace failed, %s\n", ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
acpi_format_exception(status)));
} }
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -47,135 +47,66 @@ ...@@ -47,135 +47,66 @@
#include <acpi/acdispat.h> #include <acpi/acdispat.h>
#include <acpi/acinterp.h> #include <acpi/acinterp.h>
#include <acpi/acnamesp.h> #include <acpi/acnamesp.h>
#include <acpi/acdisasm.h>
#define _COMPONENT ACPI_DISPATCHER #define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsmethod") ACPI_MODULE_NAME("dsmethod")
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ds_parse_method * FUNCTION: acpi_ds_method_error
* *
* PARAMETERS: Node - Method node * PARAMETERS: Status - Execution status
* walk_state - Current state
* *
* RETURN: Status * RETURN: Status
* *
* DESCRIPTION: Parse the AML that is associated with the method. * DESCRIPTION: Called on method error. Invoke the global exception handler if
* present, dump the method data if the disassembler is configured
* *
* MUTEX: Assumes parser is locked * Note: Allows the exception handler to change the status code
* *
******************************************************************************/ ******************************************************************************/
acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) acpi_status
acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
{ {
acpi_status status; ACPI_FUNCTION_ENTRY();
union acpi_operand_object *obj_desc;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node); /* Ignore AE_OK and control exception codes */
/* Parameter Validation */ if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
return (status);
if (!node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
} }
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, /* Invoke the global exception handler */
"**** Parsing [%4.4s] **** named_obj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */ if (acpi_gbl_exception_handler) {
/* Exit the interpreter, allow handler to execute methods */
obj_desc = acpi_ns_get_attached_object(node); acpi_ex_exit_interpreter();
if (!obj_desc) {
return_ACPI_STATUS(AE_NULL_OBJECT);
}
/* Create a mutex for the method if there is a concurrency limit */
if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
(!obj_desc->method.semaphore)) {
status = acpi_os_create_semaphore(obj_desc->method.concurrency,
obj_desc->method.concurrency,
&obj_desc->method.semaphore);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/* /*
* Allocate a new parser op to be the root of the parsed * Handler can map the exception code to anything it wants, including
* method tree * AE_OK, in which case the executing method will not be aborted.
*/
op = acpi_ps_alloc_op(AML_METHOD_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Init new op with the method name and pointer back to the Node */
acpi_ps_set_name(op, node->name.integer);
op->common.node = node;
/*
* Get a new owner_id for objects created by this method. Namespace
* objects (such as Operation Regions) can be created during the
* first pass parse.
*/ */
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); status = acpi_gbl_exception_handler(status,
if (ACPI_FAILURE(status)) { walk_state->method_node ?
goto cleanup; walk_state->method_node->
} name.integer : 0,
walk_state->opcode,
/* Create and initialize a new walk state */ walk_state->aml_offset,
walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
NULL); NULL);
if (!walk_state) { (void)acpi_ex_enter_interpreter();
status = AE_NO_MEMORY;
goto cleanup2;
} }
#ifdef ACPI_DISASSEMBLER
status = acpi_ds_init_aml_walk(walk_state, op, node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, 1);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state); /* Display method locals/args if disassembler is present */
goto cleanup2;
}
/* acpi_dm_dump_method_info(status, walk_state, walk_state->op);
* Parse the method, first pass
*
* The first pass load is where newly declared named objects are added into
* the namespace. Actual evaluation of the named objects (what would be
* called a "second pass") happens during the actual execution of the
* method so that operands to the named objects can take on dynamic
* run-time values.
*/
status = acpi_ps_parse_aml(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup2;
} }
#endif
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, return (status);
"**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
acpi_ut_get_node_name(node), node, op));
/*
* Delete the parse tree. We simply re-parse the method for every
* execution since there isn't much overhead (compared to keeping lots
* of parse trees around)
*/
acpi_ns_delete_namespace_subtree(node);
acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
cleanup2:
acpi_ut_release_owner_id(&obj_desc->method.owner_id);
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
} }
/******************************************************************************* /*******************************************************************************
...@@ -195,9 +126,9 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) ...@@ -195,9 +126,9 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
******************************************************************************/ ******************************************************************************/
acpi_status acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
union acpi_operand_object *obj_desc, union acpi_operand_object * obj_desc,
struct acpi_namespace_node *calling_method_node) struct acpi_namespace_node * calling_method_node)
{ {
acpi_status status = AE_OK; acpi_status status = AE_OK;
...@@ -210,7 +141,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, ...@@ -210,7 +141,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
/* Prevent wraparound of thread count */ /* Prevent wraparound of thread count */
if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
ACPI_REPORT_ERROR(("Method reached maximum reentrancy limit (255)\n")); ACPI_ERROR((AE_INFO,
"Method reached maximum reentrancy limit (255)"));
return_ACPI_STATUS(AE_AML_METHOD_LIMIT); return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
} }
...@@ -539,22 +471,61 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state) ...@@ -539,22 +471,61 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
acpi_os_signal_semaphore(walk_state->method_desc->method. acpi_os_signal_semaphore(walk_state->method_desc->method.
semaphore, 1); semaphore, 1);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not signal method semaphore\n")); ACPI_ERROR((AE_INFO,
"Could not signal method semaphore"));
/* Ignore error and continue cleanup */ /* Ignore error and continue cleanup */
} }
} }
/*
* There are no more threads executing this method. Perform
* additional cleanup.
*
* The method Node is stored in the walk state
*/
method_node = walk_state->method_node;
/* Lock namespace for possible update */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
/*
* Delete any namespace entries created immediately underneath
* the method
*/
if (method_node->child) {
acpi_ns_delete_namespace_subtree(method_node);
}
/*
* Delete any namespace entries created anywhere else within
* the namespace by the execution of this method
*/
acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
owner_id);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
/* Are there any other threads currently executing this method? */
if (walk_state->method_desc->method.thread_count) { if (walk_state->method_desc->method.thread_count) {
/*
* Additional threads. Do not release the owner_id in this case,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Not deleting method namespace, there are still %d threads\n", "*** Completed execution of one thread, %d threads remaining\n",
walk_state->method_desc->method. walk_state->method_desc->method.
thread_count)); thread_count));
} else { /* This is the last executing thread */ } else {
/* This is the only executing thread for this method */
/* /*
* Support to dynamically change a method from not_serialized to * Support to dynamically change a method from not_serialized to
* Serialized if it appears that the method is written foolishly and * Serialized if it appears that the method is incorrectly written and
* does not support multiple thread execution. The best example of this * does not support multiple thread execution. The best example of this
* is if such a method creates namespace objects and blocks. A second * is if such a method creates namespace objects and blocks. A second
* thread will fail with an AE_ALREADY_EXISTS exception * thread will fail with an AE_ALREADY_EXISTS exception
...@@ -570,39 +541,150 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state) ...@@ -570,39 +541,150 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
semaphore); semaphore);
} }
/* No more threads, we can free the owner_id */
acpi_ut_release_owner_id(&walk_state->method_desc->method.
owner_id);
}
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
return_VOID;
}
#ifdef ACPI_INIT_PARSE_METHODS
/* /*
* There are no more threads executing this method. Perform * Note 11/2005: Removed this code to parse all methods during table
* additional cleanup. * load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*******************************************************************************
* *
* The method Node is stored in the walk state * FUNCTION: acpi_ds_parse_method
*
* PARAMETERS: Node - Method node
*
* RETURN: Status
*
* DESCRIPTION: Parse the AML that is associated with the method.
*
* MUTEX: Assumes parser is locked
*
******************************************************************************/
acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
{
acpi_status status;
union acpi_operand_object *obj_desc;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
/* Parameter Validation */
if (!node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Parsing [%4.4s] **** named_obj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
return_ACPI_STATUS(AE_NULL_OBJECT);
}
/* Create a mutex for the method if there is a concurrency limit */
if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
(!obj_desc->method.semaphore)) {
status = acpi_os_create_semaphore(obj_desc->method.concurrency,
obj_desc->method.concurrency,
&obj_desc->method.semaphore);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Allocate a new parser op to be the root of the parsed
* method tree
*/ */
method_node = walk_state->method_node; op = acpi_ps_alloc_op(AML_METHOD_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Init new op with the method name and pointer back to the Node */
acpi_ps_set_name(op, node->name.integer);
op->common.node = node;
/* /*
* Delete any namespace entries created immediately underneath * Get a new owner_id for objects created by this method. Namespace
* the method * objects (such as Operation Regions) can be created during the
* first pass parse.
*/ */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
goto exit; goto cleanup;
} }
if (method_node->child) { /* Create and initialize a new walk state */
acpi_ns_delete_namespace_subtree(method_node);
walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup2;
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, 1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup2;
} }
/* /*
* Delete any namespace entries created anywhere else within * Parse the method, first pass
* the namespace *
* The first pass load is where newly declared named objects are added into
* the namespace. Actual evaluation of the named objects (what would be
* called a "second pass") happens during the actual execution of the
* method so that operands to the named objects can take on dynamic
* run-time values.
*/ */
acpi_ns_delete_namespace_by_owner(walk_state->method_desc-> status = acpi_ps_parse_aml(walk_state);
method.owner_id); if (ACPI_FAILURE(status)) {
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); goto cleanup2;
acpi_ut_release_owner_id(&walk_state->method_desc->method.
owner_id);
} }
exit: ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
(void)acpi_ut_release_mutex(ACPI_MTX_PARSER); "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
return_VOID; acpi_ut_get_node_name(node), node, op));
/*
* Delete the parse tree. We simply re-parse the method for every
* execution since there isn't much overhead (compared to keeping lots
* of parse trees around)
*/
acpi_ns_delete_namespace_subtree(node);
acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
cleanup2:
acpi_ut_release_owner_id(&obj_desc->method.owner_id);
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
} }
#endif
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -260,8 +260,8 @@ acpi_ds_method_data_get_node(u16 opcode, ...@@ -260,8 +260,8 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_LOCAL_OP: case AML_LOCAL_OP:
if (index > ACPI_METHOD_MAX_LOCAL) { if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Local index %d is invalid (max %d)\n", "Local index %d is invalid (max %d)",
index, ACPI_METHOD_MAX_LOCAL)); index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX); return_ACPI_STATUS(AE_AML_INVALID_INDEX);
} }
...@@ -274,8 +274,8 @@ acpi_ds_method_data_get_node(u16 opcode, ...@@ -274,8 +274,8 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_ARG_OP: case AML_ARG_OP:
if (index > ACPI_METHOD_MAX_ARG) { if (index > ACPI_METHOD_MAX_ARG) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Arg index %d is invalid (max %d)\n", "Arg index %d is invalid (max %d)",
index, ACPI_METHOD_MAX_ARG)); index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX); return_ACPI_STATUS(AE_AML_INVALID_INDEX);
} }
...@@ -286,8 +286,7 @@ acpi_ds_method_data_get_node(u16 opcode, ...@@ -286,8 +286,7 @@ acpi_ds_method_data_get_node(u16 opcode,
break; break;
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Opcode %d is invalid\n", ACPI_ERROR((AE_INFO, "Opcode %d is invalid", opcode));
opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE); return_ACPI_STATUS(AE_AML_BAD_OPCODE);
} }
...@@ -378,8 +377,7 @@ acpi_ds_method_data_get_value(u16 opcode, ...@@ -378,8 +377,7 @@ acpi_ds_method_data_get_value(u16 opcode,
/* Validate the object descriptor */ /* Validate the object descriptor */
if (!dest_desc) { if (!dest_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Null object descriptor pointer"));
"Null object descriptor pointer\n"));
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
...@@ -424,22 +422,23 @@ acpi_ds_method_data_get_value(u16 opcode, ...@@ -424,22 +422,23 @@ acpi_ds_method_data_get_value(u16 opcode,
switch (opcode) { switch (opcode) {
case AML_ARG_OP: case AML_ARG_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Uninitialized Arg[%d] at node %p\n", "Uninitialized Arg[%d] at node %p",
index, node)); index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG); return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
case AML_LOCAL_OP: case AML_LOCAL_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Uninitialized Local[%d] at node %p\n", "Uninitialized Local[%d] at node %p",
index, node)); index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL); return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
default: default:
ACPI_REPORT_ERROR(("Not Arg/Local opcode: %X\n", ACPI_ERROR((AE_INFO,
"Not a Arg/Local opcode: %X",
opcode)); opcode));
return_ACPI_STATUS(AE_AML_INTERNAL); return_ACPI_STATUS(AE_AML_INTERNAL);
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#define _COMPONENT ACPI_DISPATCHER #define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject") ACPI_MODULE_NAME("dsobject")
/* Local prototypes */
static acpi_status static acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, union acpi_parse_object *op,
...@@ -85,7 +86,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, ...@@ -85,7 +86,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
*obj_desc_ptr = NULL; *obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/* /*
* This is an named object reference. If this name was * This is a named object reference. If this name was
* previously looked up in the namespace, it was stored in this op. * previously looked up in the namespace, it was stored in this op.
* Otherwise, go ahead and look it up now * Otherwise, go ahead and look it up now
*/ */
...@@ -96,18 +97,48 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, ...@@ -96,18 +97,48 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
ACPI_IMODE_EXECUTE, ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT | ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, NULL, ACPI_NS_DONT_OPEN_SCOPE, NULL,
(struct acpi_namespace_node **) ACPI_CAST_INDIRECT_PTR(struct
&(op->common.node)); acpi_namespace_node,
&(op->
common.
node)));
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(op->common.value.string, /* Check if we are resolving a named reference within a package */
status);
if ((status == AE_NOT_FOUND)
&& (acpi_gbl_enable_interpreter_slack)
&&
((op->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode ==
AML_VAR_PACKAGE_OP))) {
/*
* We didn't find the target and we are populating elements
* of a package - ignore if slack enabled. Some ASL code
* contains dangling invalid references in packages and
* expects that no exception will be issued. Leave the
* element as a null element. It cannot be used, but it
* can be overwritten by subsequent ASL code - this is
* typically the case.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Ignoring unresolved reference in package [%4.4s]\n",
walk_state->
scope_info->scope.
node->name.ascii));
return_ACPI_STATUS(AE_OK);
} else {
ACPI_ERROR_NAMESPACE(op->common.value.
string, status);
}
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
} }
} }
/* Create and init the internal ACPI object */ /* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
(op->common.aml_opcode))-> (op->common.aml_opcode))->
...@@ -157,13 +188,13 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, ...@@ -157,13 +188,13 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj"); ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj");
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/* /*
* We are evaluating a Named buffer object "Name (xxxx, Buffer)". * If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node) * The buffer object already exists (from the NS node), otherwise it must
* be created.
*/ */
} else { obj_desc = *obj_desc_ptr;
if (!obj_desc) {
/* Create a new buffer object */ /* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER); obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
...@@ -183,10 +214,9 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, ...@@ -183,10 +214,9 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
byte_list = arg->named.next; byte_list = arg->named.next;
if (byte_list) { if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) { if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Expecting bytelist, got AML opcode %X in op %p\n", "Expecting bytelist, got AML opcode %X in op %p",
byte_list->common.aml_opcode, byte_list->common.aml_opcode, byte_list));
byte_list));
acpi_ut_remove_reference(obj_desc); acpi_ut_remove_reference(obj_desc);
return (AE_TYPE); return (AE_TYPE);
...@@ -259,7 +289,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -259,7 +289,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc = NULL; union acpi_operand_object *obj_desc = NULL;
u32 package_list_length; u32 package_list_length;
acpi_status status = AE_OK; acpi_status status = AE_OK;
u32 i; acpi_native_uint i;
ACPI_FUNCTION_TRACE("ds_build_internal_package_obj"); ACPI_FUNCTION_TRACE("ds_build_internal_package_obj");
...@@ -271,13 +301,12 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -271,13 +301,12 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
parent = parent->common.parent; parent = parent->common.parent;
} }
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/* /*
* We are evaluating a Named package object "Name (xxxx, Package)". * If we are evaluating a Named package object "Name (xxxx, Package)",
* Get the existing package object from the NS node * the package object already exists, otherwise it must be created.
*/ */
} else { obj_desc = *obj_desc_ptr;
if (!obj_desc) {
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE); obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
*obj_desc_ptr = obj_desc; *obj_desc_ptr = obj_desc;
if (!obj_desc) { if (!obj_desc) {
...@@ -291,11 +320,9 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -291,11 +320,9 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
/* Count the number of items in the package list */ /* Count the number of items in the package list */
package_list_length = 0;
arg = op->common.value.arg; arg = op->common.value.arg;
arg = arg->common.next; arg = arg->common.next;
while (arg) { for (package_list_length = 0; arg; package_list_length++) {
package_list_length++;
arg = arg->common.next; arg = arg->common.next;
} }
...@@ -322,12 +349,11 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -322,12 +349,11 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
} }
/* /*
* Now init the elements of the package * Initialize all elements of the package
*/ */
i = 0;
arg = op->common.value.arg; arg = op->common.value.arg;
arg = arg->common.next; arg = arg->common.next;
while (arg) { for (i = 0; arg; i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
/* Object (package or buffer) is already built */ /* Object (package or buffer) is already built */
...@@ -340,8 +366,6 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -340,8 +366,6 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
package. package.
elements[i]); elements[i]);
} }
i++;
arg = arg->common.next; arg = arg->common.next;
} }
...@@ -518,8 +542,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, ...@@ -518,8 +542,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Unknown constant opcode %X\n", "Unknown constant opcode %X",
opcode)); opcode));
status = AE_AML_OPERAND_TYPE; status = AE_AML_OPERAND_TYPE;
break; break;
...@@ -535,8 +559,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, ...@@ -535,8 +559,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break; break;
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
"Unknown Integer type %X\n",
op_info->type)); op_info->type));
status = AE_AML_OPERAND_TYPE; status = AE_AML_OPERAND_TYPE;
break; break;
...@@ -615,8 +638,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, ...@@ -615,8 +638,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
"Unimplemented data type: %X\n",
ACPI_GET_OBJECT_TYPE(obj_desc))); ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_OPERAND_TYPE; status = AE_AML_OPERAND_TYPE;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -245,7 +245,9 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc) ...@@ -245,7 +245,9 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->buffer.node; node = obj_desc->buffer.node;
if (!node) { if (!node) {
ACPI_REPORT_ERROR(("No pointer back to NS node in buffer obj %p\n", obj_desc)); ACPI_ERROR((AE_INFO,
"No pointer back to NS node in buffer obj %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL); return_ACPI_STATUS(AE_AML_INTERNAL);
} }
...@@ -287,7 +289,8 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc) ...@@ -287,7 +289,8 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->package.node; node = obj_desc->package.node;
if (!node) { if (!node) {
ACPI_REPORT_ERROR(("No pointer back to NS node in package %p\n", ACPI_ERROR((AE_INFO,
"No pointer back to NS node in package %p",
obj_desc)); obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL); return_ACPI_STATUS(AE_AML_INTERNAL);
} }
...@@ -413,8 +416,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, ...@@ -413,8 +416,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Host object must be a Buffer */ /* Host object must be a Buffer */
if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) { if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Target of Create Field is not a Buffer object - %s\n", "Target of Create Field is not a Buffer object - %s",
acpi_ut_get_object_type_name(buffer_desc))); acpi_ut_get_object_type_name(buffer_desc)));
status = AE_AML_OPERAND_TYPE; status = AE_AML_OPERAND_TYPE;
...@@ -427,8 +430,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, ...@@ -427,8 +430,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
* after resolution in acpi_ex_resolve_operands(). * after resolution in acpi_ex_resolve_operands().
*/ */
if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) { if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"(%s) destination not a NS Node [%s]\n", "(%s) destination not a NS Node [%s]",
acpi_ps_get_opcode_name(aml_opcode), acpi_ps_get_opcode_name(aml_opcode),
acpi_ut_get_descriptor_name(result_desc))); acpi_ut_get_descriptor_name(result_desc)));
...@@ -453,8 +456,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, ...@@ -453,8 +456,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Must have a valid (>0) bit count */ /* Must have a valid (>0) bit count */
if (bit_count == 0) { if (bit_count == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Attempt to create_field of length 0\n")); "Attempt to create_field of length zero"));
status = AE_AML_OPERAND_VALUE; status = AE_AML_OPERAND_VALUE;
goto cleanup; goto cleanup;
} }
...@@ -507,9 +510,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, ...@@ -507,9 +510,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Unknown field creation opcode %02x\n", "Unknown field creation opcode %02x", aml_opcode));
aml_opcode));
status = AE_AML_BAD_OPCODE; status = AE_AML_BAD_OPCODE;
goto cleanup; goto cleanup;
} }
...@@ -517,12 +519,11 @@ acpi_ds_init_buffer_field(u16 aml_opcode, ...@@ -517,12 +519,11 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Entire field must fit within the current length of the buffer */ /* Entire field must fit within the current length of the buffer */
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) { if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Field [%4.4s] size %d exceeds Buffer [%4.4s] size %d (bits)\n", "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
acpi_ut_get_node_name(result_desc), acpi_ut_get_node_name(result_desc),
bit_offset + bit_count, bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer. acpi_ut_get_node_name(buffer_desc->buffer.node),
node),
8 * (u32) buffer_desc->buffer.length)); 8 * (u32) buffer_desc->buffer.length));
status = AE_AML_BUFFER_LIMIT; status = AE_AML_BUFFER_LIMIT;
goto cleanup; goto cleanup;
...@@ -629,9 +630,9 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state, ...@@ -629,9 +630,9 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
"after acpi_ex_resolve_operands"); "after acpi_ex_resolve_operands");
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "(%s) bad operand(s) (%X)\n", ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
acpi_ps_get_opcode_name(op->common. acpi_ps_get_opcode_name(op->common.aml_opcode),
aml_opcode), status)); status));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -1155,8 +1156,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, ...@@ -1155,8 +1156,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
"Unknown control opcode=%X Op=%p\n",
op->common.aml_opcode, op)); op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE; status = AE_AML_BAD_OPCODE;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -176,8 +176,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op, ...@@ -176,8 +176,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
/* Must have both an Op and a Result Object */ /* Must have both an Op and a Result Object */
if (!op) { if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n")); ACPI_ERROR((AE_INFO, "Null Op"));
return_VALUE(TRUE); return_UINT8(TRUE);
} }
/* /*
...@@ -208,7 +208,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, ...@@ -208,7 +208,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
"At Method level, result of [%s] not used\n", "At Method level, result of [%s] not used\n",
acpi_ps_get_opcode_name(op->common. acpi_ps_get_opcode_name(op->common.
aml_opcode))); aml_opcode)));
return_VALUE(FALSE); return_UINT8(FALSE);
} }
/* Get info on the parent. The root_op is AML_SCOPE */ /* Get info on the parent. The root_op is AML_SCOPE */
...@@ -216,9 +216,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op, ...@@ -216,9 +216,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
parent_info = parent_info =
acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode); acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
if (parent_info->class == AML_CLASS_UNKNOWN) { if (parent_info->class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
"Unknown parent opcode. Op=%p\n", op)); return_UINT8(FALSE);
return_VALUE(FALSE);
} }
/* /*
...@@ -304,7 +303,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, ...@@ -304,7 +303,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common. acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op)); aml_opcode), op));
return_VALUE(TRUE); return_UINT8(TRUE);
result_not_used: result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
...@@ -313,7 +312,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, ...@@ -313,7 +312,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common. acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op)); aml_opcode), op));
return_VALUE(FALSE); return_UINT8(FALSE);
} }
/******************************************************************************* /*******************************************************************************
...@@ -344,7 +343,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op, ...@@ -344,7 +343,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj); ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj);
if (!op) { if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n")); ACPI_ERROR((AE_INFO, "Null Op"));
return_VOID; return_VOID;
} }
...@@ -567,7 +566,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, ...@@ -567,7 +566,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
} }
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(name_string, status); ACPI_ERROR_NAMESPACE(name_string, status);
} }
} }
...@@ -616,7 +615,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, ...@@ -616,7 +615,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
if (op_info->flags & AML_HAS_RETVAL) { if (op_info->flags & AML_HAS_RETVAL) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Argument previously created, already stacked \n")); "Argument previously created, already stacked\n"));
ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
(walk_state-> (walk_state->
...@@ -635,10 +634,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, ...@@ -635,10 +634,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* Only error is underflow, and this indicates * Only error is underflow, and this indicates
* a missing or null operand! * a missing or null operand!
*/ */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_EXCEPTION((AE_INFO, status,
"Missing or null operand, %s\n", "Missing or null operand"));
acpi_format_exception
(status)));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
} else { } else {
...@@ -730,7 +727,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state, ...@@ -730,7 +727,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
*/ */
(void)acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state); (void)acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "While creating Arg %d - %s\n", ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d",
(arg_count + 1), acpi_format_exception(status))); (arg_count + 1)));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -100,9 +100,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, ...@@ -100,9 +100,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (result_obj) { if (result_obj) {
status = acpi_ds_result_pop(&obj_desc, walk_state); status = acpi_ds_result_pop(&obj_desc, walk_state);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_EXCEPTION((AE_INFO, status,
"Could not get result from predicate evaluation, %s\n", "Could not get result from predicate evaluation"));
acpi_format_exception(status)));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -123,8 +122,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, ...@@ -123,8 +122,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
} }
if (!obj_desc) { if (!obj_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"No predicate obj_desc=%p State=%p\n", "No predicate obj_desc=%p State=%p",
obj_desc, walk_state)); obj_desc, walk_state));
return_ACPI_STATUS(AE_AML_NO_OPERAND); return_ACPI_STATUS(AE_AML_NO_OPERAND);
...@@ -140,8 +139,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, ...@@ -140,8 +139,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
} }
if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) { if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Bad predicate (not an integer) obj_desc=%p State=%p Type=%X\n", "Bad predicate (not an integer) obj_desc=%p State=%p Type=%X",
obj_desc, walk_state, obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc))); ACPI_GET_OBJECT_TYPE(obj_desc)));
...@@ -314,12 +313,13 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, ...@@ -314,12 +313,13 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
case AML_CLASS_EXECUTE: case AML_CLASS_EXECUTE:
case AML_CLASS_CREATE: case AML_CLASS_CREATE:
/* /*
* Most operators with arguments. * Most operators with arguments.
* Start a new result/operand state * Start a new result/operand state
*/ */
if (walk_state->opcode != AML_CREATE_FIELD_OP) {
status = acpi_ds_result_stack_push(walk_state); status = acpi_ds_result_stack_push(walk_state);
}
break; break;
default: default:
...@@ -361,7 +361,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) ...@@ -361,7 +361,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class; op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) { if (op_class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown opcode %X\n", ACPI_ERROR((AE_INFO, "Unknown opcode %X",
op->common.aml_opcode)); op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED); return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
} }
...@@ -452,12 +452,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) ...@@ -452,12 +452,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
walk_state->operands[1]->reference.offset)) { walk_state->operands[1]->reference.offset)) {
status = AE_OK; status = AE_OK;
} else { } else {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_EXCEPTION((AE_INFO, status,
"[%s]: Could not resolve operands, %s\n", "While resolving operands for [%s]",
acpi_ps_get_opcode_name acpi_ps_get_opcode_name
(walk_state->opcode), (walk_state->opcode)));
acpi_format_exception
(status)));
} }
} }
...@@ -676,8 +674,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) ...@@ -676,8 +674,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_UNDEFINED: case AML_TYPE_UNDEFINED:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Undefined opcode type Op=%p\n", op)); "Undefined opcode type Op=%p", op));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED); return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
case AML_TYPE_BOGUS: case AML_TYPE_BOGUS:
...@@ -689,10 +687,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) ...@@ -689,10 +687,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p\n", "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
op_class, op_type, op_class, op_type, op->common.aml_opcode,
op->common.aml_opcode, op)); op));
status = AE_NOT_IMPLEMENTED; status = AE_NOT_IMPLEMENTED;
break; break;
...@@ -723,20 +721,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) ...@@ -723,20 +721,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
cleanup: cleanup:
/* Invoke exception handler on error */
if (ACPI_FAILURE(status) &&
acpi_gbl_exception_handler && !(status & AE_CODE_CONTROL)) {
acpi_ex_exit_interpreter();
status = acpi_gbl_exception_handler(status,
walk_state->method_node->
name.integer,
walk_state->opcode,
walk_state->aml_offset,
NULL);
(void)acpi_ex_enter_interpreter();
}
if (walk_state->result_obj) { if (walk_state->result_obj) {
/* Break to debugger to display result */ /* Break to debugger to display result */
...@@ -758,18 +742,14 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) ...@@ -758,18 +742,14 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
} }
#endif #endif
/* Always clear the object stack */ /* Invoke exception handler on error */
walk_state->num_operands = 0;
#ifdef ACPI_DISASSEMBLER
/* On error, display method locals/args */
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
acpi_dm_dump_method_info(status, walk_state, op); status = acpi_ds_method_error(status, walk_state);
} }
#endif
/* Always clear the object stack */
walk_state->num_operands = 0;
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
char *path; char *path;
u32 flags; u32 flags;
ACPI_FUNCTION_NAME("ds_load1_begin_op"); ACPI_FUNCTION_TRACE("ds_load1_begin_op");
op = walk_state->op; op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
...@@ -138,14 +138,14 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -138,14 +138,14 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
if (op) { if (op) {
if (!(walk_state->op_info->flags & AML_NAMED)) { if (!(walk_state->op_info->flags & AML_NAMED)) {
*out_op = op; *out_op = op;
return (AE_OK); return_ACPI_STATUS(AE_OK);
} }
/* Check if this object has already been installed in the namespace */ /* Check if this object has already been installed in the namespace */
if (op->common.node) { if (op->common.node) {
*out_op = op; *out_op = op;
return (AE_OK); return_ACPI_STATUS(AE_OK);
} }
} }
...@@ -187,8 +187,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -187,8 +187,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
} }
#endif #endif
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(path, status); ACPI_ERROR_NAMESPACE(path, status);
return (status); return_ACPI_STATUS(status);
} }
/* /*
...@@ -233,9 +233,11 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -233,9 +233,11 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* All other types are an error */ /* All other types are an error */
ACPI_REPORT_ERROR(("Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)\n", acpi_ut_get_type_name(node->type), path)); ACPI_ERROR((AE_INFO,
"Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)",
acpi_ut_get_type_name(node->type), path));
return (AE_AML_OPERAND_TYPE); return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
} }
break; break;
...@@ -257,6 +259,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -257,6 +259,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* buffer_field, or Package), the name of the object is already * buffer_field, or Package), the name of the object is already
* in the namespace. * in the namespace.
*/ */
if (walk_state->deferred_node) { if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */ /* This name is already in the namespace, get the node */
...@@ -265,6 +268,16 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -265,6 +268,16 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
break; break;
} }
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (walk_state->method_node) {
node = NULL;
status = AE_OK;
break;
}
flags = ACPI_NS_NO_UPSEARCH; flags = ACPI_NS_NO_UPSEARCH;
if ((walk_state->opcode != AML_SCOPE_OP) && if ((walk_state->opcode != AML_SCOPE_OP) &&
(!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) { (!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
...@@ -289,8 +302,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -289,8 +302,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
ACPI_IMODE_LOAD_PASS1, flags, walk_state, ACPI_IMODE_LOAD_PASS1, flags, walk_state,
&(node)); &(node));
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(path, status); ACPI_ERROR_NAMESPACE(path, status);
return (status); return_ACPI_STATUS(status);
} }
break; break;
} }
...@@ -302,28 +315,29 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -302,28 +315,29 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
op = acpi_ps_alloc_op(walk_state->opcode); op = acpi_ps_alloc_op(walk_state->opcode);
if (!op) { if (!op) {
return (AE_NO_MEMORY); return_ACPI_STATUS(AE_NO_MEMORY);
} }
} }
/* Initialize */ /* Initialize the op */
op->named.name = node->name.integer;
#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)) #if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
op->named.path = (u8 *) path; op->named.path = ACPI_CAST_PTR(u8, path);
#endif #endif
if (node) {
/* /*
* Put the Node in the "op" object that the parser uses, so we * Put the Node in the "op" object that the parser uses, so we
* can get it again quickly when this scope is closed * can get it again quickly when this scope is closed
*/ */
op->common.node = node; op->common.node = node;
op->named.name = node->name.integer;
}
acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state), acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state),
op); op);
*out_op = op; *out_op = op;
return (status); return_ACPI_STATUS(status);
} }
/******************************************************************************* /*******************************************************************************
...@@ -339,13 +353,13 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -339,13 +353,13 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* *
******************************************************************************/ ******************************************************************************/
acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
{ {
union acpi_parse_object *op; union acpi_parse_object *op;
acpi_object_type object_type; acpi_object_type object_type;
acpi_status status = AE_OK; acpi_status status = AE_OK;
ACPI_FUNCTION_NAME("ds_load1_end_op"); ACPI_FUNCTION_TRACE("ds_load1_end_op");
op = walk_state->op; op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
...@@ -354,7 +368,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) ...@@ -354,7 +368,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
/* We are only interested in opcodes that have an associated name */ /* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
return (AE_OK); return_ACPI_STATUS(AE_OK);
} }
/* Get the object type to determine if we should pop the scope */ /* Get the object type to determine if we should pop the scope */
...@@ -363,21 +377,37 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) ...@@ -363,21 +377,37 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
#ifndef ACPI_NO_METHOD_EXECUTION #ifndef ACPI_NO_METHOD_EXECUTION
if (walk_state->op_info->flags & AML_FIELD) { if (walk_state->op_info->flags & AML_FIELD) {
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (!walk_state->method_node) {
if (walk_state->opcode == AML_FIELD_OP || if (walk_state->opcode == AML_FIELD_OP ||
walk_state->opcode == AML_BANK_FIELD_OP || walk_state->opcode == AML_BANK_FIELD_OP ||
walk_state->opcode == AML_INDEX_FIELD_OP) { walk_state->opcode == AML_INDEX_FIELD_OP) {
status = acpi_ds_init_field_objects(op, walk_state); status =
acpi_ds_init_field_objects(op, walk_state);
} }
return (status); }
return_ACPI_STATUS(status);
} }
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (!walk_state->method_node) {
if (op->common.aml_opcode == AML_REGION_OP) { if (op->common.aml_opcode == AML_REGION_OP) {
status = acpi_ex_create_region(op->named.data, op->named.length, status =
acpi_ex_create_region(op->named.data,
op->named.length,
(acpi_adr_space_type) (acpi_adr_space_type)
((op->common.value.arg)->common. ((op->common.value.arg)->
value.integer), walk_state); common.value.integer),
walk_state);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return (status); return_ACPI_STATUS(status);
}
} }
} }
#endif #endif
...@@ -391,10 +421,20 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) ...@@ -391,10 +421,20 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
common. common.
aml_opcode))-> aml_opcode))->
object_type; object_type;
/* Set node type if we have a namespace node */
if (op->common.node) {
op->common.node->type = (u8) object_type; op->common.node->type = (u8) object_type;
} }
} }
}
/*
* If we are executing a method, do not create any namespace objects
* during the load phase, only during execution.
*/
if (!walk_state->method_node) {
if (op->common.aml_opcode == AML_METHOD_OP) { if (op->common.aml_opcode == AML_METHOD_OP) {
/* /*
* method_op pkg_length name_string method_flags term_list * method_op pkg_length name_string method_flags term_list
...@@ -409,29 +449,35 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) ...@@ -409,29 +449,35 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
walk_state, op, op->named.node)); walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) { if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] = (void *)op->named.node; walk_state->operands[0] =
ACPI_CAST_PTR(void, op->named.node);
walk_state->num_operands = 1; walk_state->num_operands = 1;
status = status =
acpi_ds_create_operands(walk_state, acpi_ds_create_operands(walk_state,
op->common.value.arg); op->common.value.
arg);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
status = acpi_ex_create_method(op->named.data, status =
op->named.length, acpi_ex_create_method(op->named.
data,
op->named.
length,
walk_state); walk_state);
} }
walk_state->operands[0] = NULL; walk_state->operands[0] = NULL;
walk_state->num_operands = 0; walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return (status); return_ACPI_STATUS(status);
}
} }
} }
} }
/* Pop the scope stack */ /* Pop the scope stack (only if loading a table) */
if (acpi_ns_opens_scope(object_type)) { if (!walk_state->method_node && acpi_ns_opens_scope(object_type)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"(%s): Popping scope for Op %p\n", "(%s): Popping scope for Op %p\n",
acpi_ut_get_type_name(object_type), op)); acpi_ut_get_type_name(object_type), op));
...@@ -439,7 +485,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) ...@@ -439,7 +485,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
status = acpi_ds_scope_stack_pop(walk_state); status = acpi_ds_scope_stack_pop(walk_state);
} }
return (status); return_ACPI_STATUS(status);
} }
/******************************************************************************* /*******************************************************************************
...@@ -456,8 +502,8 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) ...@@ -456,8 +502,8 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
******************************************************************************/ ******************************************************************************/
acpi_status acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state, acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object ** out_op) union acpi_parse_object **out_op)
{ {
union acpi_parse_object *op; union acpi_parse_object *op;
struct acpi_namespace_node *node; struct acpi_namespace_node *node;
...@@ -574,10 +620,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state, ...@@ -574,10 +620,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
if (status == AE_NOT_FOUND) { if (status == AE_NOT_FOUND) {
status = AE_OK; status = AE_OK;
} else { } else {
ACPI_REPORT_NSERROR(buffer_ptr, status); ACPI_ERROR_NAMESPACE(buffer_ptr, status);
} }
#else #else
ACPI_REPORT_NSERROR(buffer_ptr, status); ACPI_ERROR_NAMESPACE(buffer_ptr, status);
#endif #endif
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -607,7 +653,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state, ...@@ -607,7 +653,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
* Scope (DEB) { ... } * Scope (DEB) { ... }
*/ */
ACPI_REPORT_WARNING(("Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n", buffer_ptr, acpi_ut_get_type_name(node->type))); ACPI_WARNING((AE_INFO,
"Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
buffer_ptr,
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY; node->type = ACPI_TYPE_ANY;
walk_state->scope_info->common.value = ACPI_TYPE_ANY; walk_state->scope_info->common.value = ACPI_TYPE_ANY;
...@@ -617,7 +666,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state, ...@@ -617,7 +666,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
/* All other types are an error */ /* All other types are an error */
ACPI_REPORT_ERROR(("Invalid type (%s) for target of Scope operator [%4.4s]\n", acpi_ut_get_type_name(node->type), buffer_ptr)); ACPI_ERROR((AE_INFO,
"Invalid type (%s) for target of Scope operator [%4.4s]",
acpi_ut_get_type_name(node->type),
buffer_ptr));
return (AE_AML_OPERAND_TYPE); return (AE_AML_OPERAND_TYPE);
} }
...@@ -670,7 +722,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state, ...@@ -670,7 +722,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
} }
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(buffer_ptr, status); ACPI_ERROR_NAMESPACE(buffer_ptr, status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -840,6 +892,13 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) ...@@ -840,6 +892,13 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_NAMED_FIELD: case AML_TYPE_NAMED_FIELD:
/*
* If we are executing a method, initialize the field
*/
if (walk_state->method_node) {
status = acpi_ds_init_field_objects(op, walk_state);
}
switch (op->common.aml_opcode) { switch (op->common.aml_opcode) {
case AML_INDEX_FIELD_OP: case AML_INDEX_FIELD_OP:
...@@ -929,6 +988,24 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) ...@@ -929,6 +988,24 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
switch (op->common.aml_opcode) { switch (op->common.aml_opcode) {
#ifndef ACPI_NO_METHOD_EXECUTION #ifndef ACPI_NO_METHOD_EXECUTION
case AML_REGION_OP: case AML_REGION_OP:
/*
* If we are executing a method, initialize the region
*/
if (walk_state->method_node) {
status =
acpi_ex_create_region(op->named.data,
op->named.length,
(acpi_adr_space_type)
((op->common.value.
arg)->common.value.
integer),
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
}
/* /*
* The op_region is not fully parsed at this time. Only valid * The op_region is not fully parsed at this time. Only valid
* argument is the space_id. (We must save the address of the * argument is the space_id. (We must save the address of the
...@@ -957,11 +1034,50 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) ...@@ -957,11 +1034,50 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
status = acpi_ds_create_node(walk_state, node, op); status = acpi_ds_create_node(walk_state, node, op);
break; break;
case AML_METHOD_OP:
/*
* method_op pkg_length name_string method_flags term_list
*
* Note: We must create the method node/object pair as soon as we
* see the method declaration. This allows later pass1 parsing
* of invocations of the method (need to know the number of
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p named_obj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] =
ACPI_CAST_PTR(void, op->named.node);
walk_state->num_operands = 1;
status =
acpi_ds_create_operands(walk_state,
op->common.value.
arg);
if (ACPI_SUCCESS(status)) {
status =
acpi_ex_create_method(op->named.
data,
op->named.
length,
walk_state);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */ #endif /* ACPI_NO_METHOD_EXECUTION */
default: default:
/* All NAMED_COMPLEX opcodes must be handled above */ /* All NAMED_COMPLEX opcodes must be handled above */
/* Note: Method objects were already created in Pass 1 */
break; break;
} }
break; break;
...@@ -1004,7 +1120,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) ...@@ -1004,7 +1120,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/ */
op->common.node = new_node; op->common.node = new_node;
} else { } else {
ACPI_REPORT_NSERROR(arg->common.value.string, status); ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
} }
break; break;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -107,14 +107,14 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node, ...@@ -107,14 +107,14 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
if (!node) { if (!node) {
/* Invalid scope */ /* Invalid scope */
ACPI_REPORT_ERROR(("ds_scope_stack_push: null scope passed\n")); ACPI_ERROR((AE_INFO, "Null scope parameter"));
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
/* Make sure object type is valid */ /* Make sure object type is valid */
if (!acpi_ut_valid_object_type(type)) { if (!acpi_ut_valid_object_type(type)) {
ACPI_REPORT_WARNING(("ds_scope_stack_push: Invalid object type: 0x%X\n", type)); ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type));
} }
/* Allocate a new scope object */ /* Allocate a new scope object */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -92,26 +92,23 @@ acpi_ds_result_remove(union acpi_operand_object **object, ...@@ -92,26 +92,23 @@ acpi_ds_result_remove(union acpi_operand_object **object,
state = walk_state->results; state = walk_state->results;
if (!state) { if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
"No result object pushed! State=%p\n",
walk_state)); walk_state));
return (AE_NOT_EXIST); return (AE_NOT_EXIST);
} }
if (index >= ACPI_OBJ_MAX_OPERAND) { if (index >= ACPI_OBJ_MAX_OPERAND) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Index out of range: %X State=%p Num=%X\n", "Index out of range: %X State=%p Num=%X",
index, walk_state, index, walk_state, state->results.num_results));
state->results.num_results));
} }
/* Check for a valid result object */ /* Check for a valid result object */
if (!state->results.obj_desc[index]) { if (!state->results.obj_desc[index]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X, Index=%X\n", "Null operand! State=%p #Ops=%X, Index=%X",
walk_state, state->results.num_results, walk_state, state->results.num_results, index));
index));
return (AE_AML_NO_RETURN_VALUE); return (AE_AML_NO_RETURN_VALUE);
} }
...@@ -163,8 +160,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object, ...@@ -163,8 +160,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
} }
if (!state->results.num_results) { if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
"Result stack is empty! State=%p\n",
walk_state)); walk_state));
return (AE_AML_NO_RETURN_VALUE); return (AE_AML_NO_RETURN_VALUE);
} }
...@@ -192,8 +188,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object, ...@@ -192,8 +188,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
} }
} }
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "No result objects! State=%p", walk_state));
"No result objects! State=%p\n", walk_state));
return (AE_AML_NO_RETURN_VALUE); return (AE_AML_NO_RETURN_VALUE);
} }
...@@ -222,15 +217,14 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object, ...@@ -222,15 +217,14 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
state = walk_state->results; state = walk_state->results;
if (!state) { if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Warning: No result object pushed! State=%p\n", "No result object pushed! State=%p", walk_state));
walk_state));
return (AE_NOT_EXIST); return (AE_NOT_EXIST);
} }
if (!state->results.num_results) { if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "No result objects! State=%p",
"No result objects! State=%p\n", walk_state)); walk_state));
return (AE_AML_NO_RETURN_VALUE); return (AE_AML_NO_RETURN_VALUE);
} }
...@@ -250,8 +244,8 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object, ...@@ -250,8 +244,8 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
/* Check for a valid result object */ /* Check for a valid result object */
if (!*object) { if (!*object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X Index=%X\n", "Null operand! State=%p #Ops=%X Index=%X",
walk_state, state->results.num_results, walk_state, state->results.num_results,
(u32) index)); (u32) index));
return (AE_AML_NO_RETURN_VALUE); return (AE_AML_NO_RETURN_VALUE);
...@@ -288,23 +282,21 @@ acpi_ds_result_push(union acpi_operand_object * object, ...@@ -288,23 +282,21 @@ acpi_ds_result_push(union acpi_operand_object * object,
state = walk_state->results; state = walk_state->results;
if (!state) { if (!state) {
ACPI_REPORT_ERROR(("No result stack frame during push\n")); ACPI_ERROR((AE_INFO, "No result stack frame during push"));
return (AE_AML_INTERNAL); return (AE_AML_INTERNAL);
} }
if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) { if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Result stack overflow: Obj=%p State=%p Num=%X\n", "Result stack overflow: Obj=%p State=%p Num=%X",
object, walk_state, object, walk_state, state->results.num_results));
state->results.num_results));
return (AE_STACK_OVERFLOW); return (AE_STACK_OVERFLOW);
} }
if (!object) { if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Null Object! Obj=%p State=%p Num=%X\n", "Null Object! Obj=%p State=%p Num=%X",
object, walk_state, object, walk_state, state->results.num_results));
state->results.num_results));
return (AE_BAD_PARAMETER); return (AE_BAD_PARAMETER);
} }
...@@ -413,10 +405,9 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state) ...@@ -413,10 +405,9 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
/* Check for stack overflow */ /* Check for stack overflow */
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) { if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"overflow! Obj=%p State=%p #Ops=%X\n", "Object stack overflow! Obj=%p State=%p #Ops=%X",
object, walk_state, object, walk_state, walk_state->num_operands));
walk_state->num_operands));
return (AE_STACK_OVERFLOW); return (AE_STACK_OVERFLOW);
} }
...@@ -460,8 +451,8 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state) ...@@ -460,8 +451,8 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
/* Check for stack underflow */ /* Check for stack underflow */
if (walk_state->num_operands == 0) { if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Underflow! Count=%X State=%p #Ops=%X\n", "Object stack underflow! Count=%X State=%p #Ops=%X",
pop_count, walk_state, pop_count, walk_state,
walk_state->num_operands)); walk_state->num_operands));
return (AE_STACK_UNDERFLOW); return (AE_STACK_UNDERFLOW);
...@@ -506,8 +497,8 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count, ...@@ -506,8 +497,8 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
/* Check for stack underflow */ /* Check for stack underflow */
if (walk_state->num_operands == 0) { if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Underflow! Count=%X State=%p #Ops=%X\n", "Object stack underflow! Count=%X State=%p #Ops=%X",
pop_count, walk_state, pop_count, walk_state,
walk_state->num_operands)); walk_state->num_operands));
return (AE_STACK_UNDERFLOW); return (AE_STACK_UNDERFLOW);
...@@ -826,15 +817,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state) ...@@ -826,15 +817,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
} }
if (walk_state->data_type != ACPI_DESC_TYPE_WALK) { if (walk_state->data_type != ACPI_DESC_TYPE_WALK) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
"%p is not a valid walk state\n",
walk_state)); walk_state));
return; return;
} }
if (walk_state->parser_state.scope) { if (walk_state->parser_state.scope) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
"%p walk still has a scope list\n",
walk_state)); walk_state));
} }
...@@ -894,23 +883,22 @@ acpi_ds_result_insert(void *object, ...@@ -894,23 +883,22 @@ acpi_ds_result_insert(void *object,
state = walk_state->results; state = walk_state->results;
if (!state) { if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
"No result object pushed! State=%p\n",
walk_state)); walk_state));
return (AE_NOT_EXIST); return (AE_NOT_EXIST);
} }
if (index >= ACPI_OBJ_NUM_OPERANDS) { if (index >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Index out of range: %X Obj=%p State=%p Num=%X\n", "Index out of range: %X Obj=%p State=%p Num=%X",
index, object, walk_state, index, object, walk_state,
state->results.num_results)); state->results.num_results));
return (AE_BAD_PARAMETER); return (AE_BAD_PARAMETER);
} }
if (!object) { if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Null Object! Index=%X Obj=%p State=%p Num=%X\n", "Null Object! Index=%X Obj=%p State=%p Num=%X",
index, object, walk_state, index, object, walk_state,
state->results.num_results)); state->results.num_results));
return (AE_BAD_PARAMETER); return (AE_BAD_PARAMETER);
...@@ -986,8 +974,8 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object, ...@@ -986,8 +974,8 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for stack underflow */ /* Check for stack underflow */
if (walk_state->num_operands == 0) { if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Missing operand/stack empty! State=%p #Ops=%X\n", "Missing operand/stack empty! State=%p #Ops=%X",
walk_state, walk_state->num_operands)); walk_state, walk_state->num_operands));
*object = NULL; *object = NULL;
return (AE_AML_NO_OPERAND); return (AE_AML_NO_OPERAND);
...@@ -1000,8 +988,8 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object, ...@@ -1000,8 +988,8 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for a valid operand */ /* Check for a valid operand */
if (!walk_state->operands[walk_state->num_operands]) { if (!walk_state->operands[walk_state->num_operands]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X\n", "Null operand! State=%p #Ops=%X",
walk_state, walk_state->num_operands)); walk_state, walk_state->num_operands));
*object = NULL; *object = NULL;
return (AE_AML_NO_OPERAND); return (AE_AML_NO_OPERAND);
......
此差异已折叠。
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -73,7 +73,7 @@ acpi_status acpi_ev_initialize_events(void) ...@@ -73,7 +73,7 @@ acpi_status acpi_ev_initialize_events(void)
/* Make sure we have ACPI tables */ /* Make sure we have ACPI tables */
if (!acpi_gbl_DSDT) { if (!acpi_gbl_DSDT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "No ACPI tables present!\n")); ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES); return_ACPI_STATUS(AE_NO_ACPI_TABLES);
} }
...@@ -84,20 +84,63 @@ acpi_status acpi_ev_initialize_events(void) ...@@ -84,20 +84,63 @@ acpi_status acpi_ev_initialize_events(void)
*/ */
status = acpi_ev_fixed_event_initialize(); status = acpi_ev_fixed_event_initialize();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize fixed events, %s\n", ACPI_EXCEPTION((AE_INFO, status,
acpi_format_exception(status))); "Unable to initialize fixed events"));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
status = acpi_ev_gpe_initialize(); status = acpi_ev_gpe_initialize();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize general purpose events, %s\n", acpi_format_exception(status))); ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize general purpose events"));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_fadt_gpes
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
* (0 and 1). This causes the _PRW methods to be run, so the HW
* must be fully initialized at this point, including global lock
* support.
*
******************************************************************************/
acpi_status acpi_ev_install_fadt_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_install_fadt_gpes");
/* Namespace must be locked */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* FADT GPE Block 0 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[0]);
/* FADT GPE Block 1 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[1]);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ev_install_xrupt_handlers * FUNCTION: acpi_ev_install_xrupt_handlers
...@@ -120,7 +163,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void) ...@@ -120,7 +163,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
status = acpi_ev_install_sci_handler(); status = acpi_ev_install_sci_handler();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to install System Control Interrupt Handler, %s\n", acpi_format_exception(status))); ACPI_EXCEPTION((AE_INFO, status,
"Unable to install System Control Interrupt handler"));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -128,7 +172,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void) ...@@ -128,7 +172,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
status = acpi_ev_init_global_lock_handler(); status = acpi_ev_init_global_lock_handler();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize Global Lock handler, %s\n", acpi_format_exception(status))); ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize Global Lock handler"));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -262,7 +307,9 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) ...@@ -262,7 +307,9 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
enable_register_id, 0, enable_register_id, 0,
ACPI_MTX_DO_NOT_LOCK); ACPI_MTX_DO_NOT_LOCK);
ACPI_REPORT_ERROR(("No installed handler for fixed event [%08X]\n", event)); ACPI_ERROR((AE_INFO,
"No installed handler for fixed event [%08X]",
event));
return (ACPI_INTERRUPT_NOT_HANDLED); return (ACPI_INTERRUPT_NOT_HANDLED);
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -372,14 +372,14 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, ...@@ -372,14 +372,14 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
{ {
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_register_info *gpe_register_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte; u8 enabled_status_byte;
struct acpi_gpe_register_info *gpe_register_info;
u32 status_reg; u32 status_reg;
u32 enable_reg; u32 enable_reg;
u32 flags; acpi_cpu_flags flags;
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
acpi_native_uint i; acpi_native_uint i;
acpi_native_uint j; acpi_native_uint j;
...@@ -546,7 +546,11 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) ...@@ -546,7 +546,11 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = acpi_ns_evaluate_by_handle(&info); status = acpi_ns_evaluate_by_handle(&info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("%s while evaluating method [%4.4s] for GPE[%2X]\n", acpi_format_exception(status), acpi_ut_get_node_name(local_gpe_event_info.dispatch.method_node), gpe_number)); ACPI_EXCEPTION((AE_INFO, status,
"While evaluating method [%4.4s] for GPE[%2X]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
method_node), gpe_number));
} }
} }
...@@ -599,8 +603,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -599,8 +603,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_EDGE_TRIGGERED) { ACPI_GPE_EDGE_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info); status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number)); ACPI_EXCEPTION((AE_INFO, status,
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED); "Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
} }
} }
...@@ -637,8 +643,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -637,8 +643,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_LEVEL_TRIGGERED) { ACPI_GPE_LEVEL_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info); status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number)); ACPI_EXCEPTION((AE_INFO, status,
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED); "Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
} }
} }
break; break;
...@@ -651,8 +659,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -651,8 +659,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/ */
status = acpi_ev_disable_gpe(gpe_event_info); status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number)); ACPI_EXCEPTION((AE_INFO, status,
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED); "Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
} }
/* /*
...@@ -663,7 +673,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -663,7 +673,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
acpi_ev_asynch_execute_gpe_method, acpi_ev_asynch_execute_gpe_method,
gpe_event_info); gpe_event_info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to queue handler for GPE[%2X] - event disabled\n", acpi_format_exception(status), gpe_number)); ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
gpe_number));
} }
break; break;
...@@ -671,7 +683,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -671,7 +683,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
/* No handler or method to run! */ /* No handler or method to run! */
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n", gpe_number)); ACPI_ERROR((AE_INFO,
"No handler or method for GPE[%2X], disabling event",
gpe_number));
/* /*
* Disable the GPE. The GPE will remain disabled until the ACPI * Disable the GPE. The GPE will remain disabled until the ACPI
...@@ -679,13 +693,15 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -679,13 +693,15 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/ */
status = acpi_ev_disable_gpe(gpe_event_info); status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number)); ACPI_EXCEPTION((AE_INFO, status,
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED); "Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
} }
break; break;
} }
return_VALUE(ACPI_INTERRUPT_HANDLED); return_UINT32(ACPI_INTERRUPT_HANDLED);
} }
#ifdef ACPI_GPE_NOTIFY_CHECK #ifdef ACPI_GPE_NOTIFY_CHECK
...@@ -722,7 +738,9 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info) ...@@ -722,7 +738,9 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE); acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
ACPI_REPORT_INFO(("GPE %p was updated from wake/run to wake-only\n", gpe_event_info)); ACPI_INFO((AE_INFO,
"GPE %p was updated from wake/run to wake-only",
gpe_event_info));
/* This was a wake-only GPE */ /* This was a wake-only GPE */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -136,7 +136,7 @@ acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback) ...@@ -136,7 +136,7 @@ acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info; struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK; acpi_status status = AE_OK;
u32 flags; acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_walk_gpe_list"); ACPI_FUNCTION_TRACE("ev_walk_gpe_list");
...@@ -279,8 +279,8 @@ acpi_ev_save_method_info(acpi_handle obj_handle, ...@@ -279,8 +279,8 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
default: default:
/* Unknown method type, just ignore it! */ /* Unknown method type, just ignore it! */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Unknown GPE method type: %s (name not of form _Lxx or _Exx)\n", "Unknown GPE method type: %s (name not of form _Lxx or _Exx)",
name)); name));
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
...@@ -291,8 +291,8 @@ acpi_ev_save_method_info(acpi_handle obj_handle, ...@@ -291,8 +291,8 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
if (gpe_number == ACPI_UINT32_MAX) { if (gpe_number == ACPI_UINT32_MAX) {
/* Conversion failed; invalid method, just ignore it */ /* Conversion failed; invalid method, just ignore it */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)\n", "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
name)); name));
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
...@@ -319,8 +319,8 @@ acpi_ev_save_method_info(acpi_handle obj_handle, ...@@ -319,8 +319,8 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
gpe_event_info = gpe_event_info =
&gpe_block->event_info[gpe_number - gpe_block->block_base_number]; &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD | gpe_event_info->flags = (u8)
ACPI_GPE_TYPE_RUNTIME); (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
gpe_event_info->dispatch.method_node = gpe_event_info->dispatch.method_node =
(struct acpi_namespace_node *)obj_handle; (struct acpi_namespace_node *)obj_handle;
...@@ -443,6 +443,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, ...@@ -443,6 +443,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
gpe_event_info->flags &= gpe_event_info->flags &=
~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED); ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
status = status =
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE); acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -479,7 +480,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 ...@@ -479,7 +480,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
struct acpi_gpe_xrupt_info *next_gpe_xrupt; struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt; struct acpi_gpe_xrupt_info *gpe_xrupt;
acpi_status status; acpi_status status;
u32 flags; acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_get_gpe_xrupt_block"); ACPI_FUNCTION_TRACE("ev_get_gpe_xrupt_block");
...@@ -526,8 +527,8 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 ...@@ -526,8 +527,8 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
acpi_ev_gpe_xrupt_handler, acpi_ev_gpe_xrupt_handler,
gpe_xrupt); gpe_xrupt);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not install GPE interrupt handler at level 0x%X\n", "Could not install GPE interrupt handler at level 0x%X",
interrupt_number)); interrupt_number));
return_PTR(NULL); return_PTR(NULL);
} }
...@@ -553,7 +554,7 @@ static acpi_status ...@@ -553,7 +554,7 @@ static acpi_status
acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
{ {
acpi_status status; acpi_status status;
u32 flags; acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_delete_gpe_xrupt"); ACPI_FUNCTION_TRACE("ev_delete_gpe_xrupt");
...@@ -566,7 +567,8 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) ...@@ -566,7 +567,8 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
/* Disable this interrupt */ /* Disable this interrupt */
status = acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number, status =
acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
acpi_ev_gpe_xrupt_handler); acpi_ev_gpe_xrupt_handler);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
...@@ -610,7 +612,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, ...@@ -610,7 +612,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
struct acpi_gpe_block_info *next_gpe_block; struct acpi_gpe_block_info *next_gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_block; struct acpi_gpe_xrupt_info *gpe_xrupt_block;
acpi_status status; acpi_status status;
u32 flags; acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_install_gpe_block"); ACPI_FUNCTION_TRACE("ev_install_gpe_block");
...@@ -663,7 +665,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, ...@@ -663,7 +665,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
{ {
acpi_status status; acpi_status status;
u32 flags; acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("ev_install_gpe_block"); ACPI_FUNCTION_TRACE("ev_install_gpe_block");
...@@ -743,8 +745,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) ...@@ -743,8 +745,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
sizeof(struct sizeof(struct
acpi_gpe_register_info)); acpi_gpe_register_info));
if (!gpe_register_info) { if (!gpe_register_info) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not allocate the gpe_register_info table\n")); "Could not allocate the gpe_register_info table"));
return_ACPI_STATUS(AE_NO_MEMORY); return_ACPI_STATUS(AE_NO_MEMORY);
} }
...@@ -757,8 +759,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) ...@@ -757,8 +759,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
ACPI_GPE_REGISTER_WIDTH) * ACPI_GPE_REGISTER_WIDTH) *
sizeof(struct acpi_gpe_event_info)); sizeof(struct acpi_gpe_event_info));
if (!gpe_event_info) { if (!gpe_event_info) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not allocate the gpe_event_info table\n")); "Could not allocate the gpe_event_info table"));
status = AE_NO_MEMORY; status = AE_NO_MEMORY;
goto error_exit; goto error_exit;
} }
...@@ -771,7 +773,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) ...@@ -771,7 +773,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
/* /*
* Initialize the GPE Register and Event structures. A goal of these * Initialize the GPE Register and Event structures. A goal of these
* tables is to hide the fact that there are two separate GPE register sets * tables is to hide the fact that there are two separate GPE register sets
* in a given gpe hardware block, the status registers occupy the first half, * in a given GPE hardware block, the status registers occupy the first half,
* and the enable registers occupy the second half. * and the enable registers occupy the second half.
*/ */
this_register = gpe_register_info; this_register = gpe_register_info;
...@@ -812,11 +814,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) ...@@ -812,11 +814,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
this_event++; this_event++;
} }
/* /* Disable all GPEs within this register */
* Clear the status/enable registers. Note that status registers
* are cleared by writing a '1', while enable registers are cleared
* by writing a '0'.
*/
status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00, status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
&this_register-> &this_register->
enable_address); enable_address);
...@@ -824,6 +823,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) ...@@ -824,6 +823,8 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
goto error_exit; goto error_exit;
} }
/* Clear any pending GPE events within this register */
status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF, status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
&this_register-> &this_register->
status_address); status_address);
...@@ -860,7 +861,9 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) ...@@ -860,7 +861,9 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* *
* RETURN: Status * RETURN: Status
* *
* DESCRIPTION: Create and Install a block of GPE registers * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
* the block are disabled at exit.
* Note: Assumes namespace is locked.
* *
******************************************************************************/ ******************************************************************************/
...@@ -872,14 +875,8 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, ...@@ -872,14 +875,8 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
u32 interrupt_number, u32 interrupt_number,
struct acpi_gpe_block_info **return_gpe_block) struct acpi_gpe_block_info **return_gpe_block)
{ {
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_event_info *gpe_event_info;
acpi_native_uint i;
acpi_native_uint j;
u32 wake_gpe_count;
u32 gpe_enabled_count;
acpi_status status; acpi_status status;
struct acpi_gpe_walk_info gpe_info; struct acpi_gpe_block_info *gpe_block;
ACPI_FUNCTION_TRACE("ev_create_gpe_block"); ACPI_FUNCTION_TRACE("ev_create_gpe_block");
...@@ -896,22 +893,24 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, ...@@ -896,22 +893,24 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Initialize the new GPE block */ /* Initialize the new GPE block */
gpe_block->node = gpe_device;
gpe_block->register_count = register_count; gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number; gpe_block->block_base_number = gpe_block_base_number;
gpe_block->node = gpe_device;
ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
sizeof(struct acpi_generic_address)); sizeof(struct acpi_generic_address));
/* Create the register_info and event_info sub-structures */ /*
* Create the register_info and event_info sub-structures
* Note: disables and clears all GPEs in the block
*/
status = acpi_ev_create_gpe_info_blocks(gpe_block); status = acpi_ev_create_gpe_info_blocks(gpe_block);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_MEM_FREE(gpe_block); ACPI_MEM_FREE(gpe_block);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
/* Install the new block in the global list(s) */ /* Install the new block in the global lists */
status = acpi_ev_install_gpe_block(gpe_block, interrupt_number); status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -926,16 +925,70 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, ...@@ -926,16 +925,70 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_ev_save_method_info, gpe_block, acpi_ev_save_method_info, gpe_block,
NULL); NULL);
/* Return the new block */
if (return_gpe_block) {
(*return_gpe_block) = gpe_block;
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
((gpe_block->register_count *
ACPI_GPE_REGISTER_WIDTH) - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_initialize_gpe_block
*
* PARAMETERS: gpe_device - Handle to the parent GPE block
* gpe_block - Gpe Block info
*
* RETURN: Status
*
* DESCRIPTION: Initialize and enable a GPE block. First find and run any
* _PRT methods associated with the block, then enable the
* appropriate GPEs.
* Note: Assumes namespace is locked.
*
******************************************************************************/
acpi_status
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info *gpe_block)
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_walk_info gpe_info;
u32 wake_gpe_count;
u32 gpe_enabled_count;
acpi_native_uint i;
acpi_native_uint j;
ACPI_FUNCTION_TRACE("ev_initialize_gpe_block");
/* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
if (!gpe_block) {
return_ACPI_STATUS(AE_OK);
}
/* /*
* Runtime option: Should Wake GPEs be enabled at runtime? The default * Runtime option: Should wake GPEs be enabled at runtime? The default
* is No, they should only be enabled just as the machine goes to sleep. * is no, they should only be enabled just as the machine goes to sleep.
*/ */
if (acpi_gbl_leave_wake_gpes_disabled) { if (acpi_gbl_leave_wake_gpes_disabled) {
/* /*
* Differentiate RUNTIME vs WAKE GPEs, via the _PRW control methods. * Differentiate runtime vs wake GPEs, via the _PRW control methods.
* (Each GPE that has one or more _PRWs that reference it is by * Each GPE that has one or more _PRWs that reference it is by
* definition a WAKE GPE and will not be enabled while the machine * definition a wake GPE and will not be enabled while the machine
* is running.) * is running.
*/ */
gpe_info.gpe_block = gpe_block; gpe_info.gpe_block = gpe_block;
gpe_info.gpe_device = gpe_device; gpe_info.gpe_device = gpe_device;
...@@ -948,9 +1001,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, ...@@ -948,9 +1001,12 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
} }
/* /*
* Enable all GPEs in this block that are 1) "runtime" or "run/wake" GPEs, * Enable all GPEs in this block that have these attributes:
* and 2) have a corresponding _Lxx or _Exx method. All other GPEs must * 1) are "runtime" or "run/wake" GPEs, and
* be enabled via the acpi_enable_gpe() external interface. * 2) have a corresponding _Lxx or _Exx method
*
* Any other GPEs within this block must be enabled via the acpi_enable_gpe()
* external interface.
*/ */
wake_gpe_count = 0; wake_gpe_count = 0;
gpe_enabled_count = 0; gpe_enabled_count = 0;
...@@ -976,32 +1032,19 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, ...@@ -976,32 +1032,19 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
} }
} }
/* Dump info about this GPE block */
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
((gpe_block->register_count *
ACPI_GPE_REGISTER_WIDTH) - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
/* Enable all valid GPEs found above */
status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
ACPI_DEBUG_PRINT((ACPI_DB_INIT, ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Found %u Wake, Enabled %u Runtime GPEs in this block\n", "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
wake_gpe_count, gpe_enabled_count)); wake_gpe_count, gpe_enabled_count));
/* Return the new block */ /* Enable all valid runtime GPEs found above */
if (return_gpe_block) { status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
(*return_gpe_block) = gpe_block; if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not enable GPEs in gpe_block %p",
gpe_block));
} }
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(status);
} }
/******************************************************************************* /*******************************************************************************
...@@ -1072,8 +1115,8 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1072,8 +1115,8 @@ acpi_status acpi_ev_gpe_initialize(void)
&acpi_gbl_gpe_fadt_blocks[0]); &acpi_gbl_gpe_fadt_blocks[0]);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not create GPE Block 0, %s\n", ACPI_EXCEPTION((AE_INFO, status,
acpi_format_exception(status))); "Could not create GPE Block 0"));
} }
} }
...@@ -1086,7 +1129,12 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1086,7 +1129,12 @@ acpi_status acpi_ev_gpe_initialize(void)
if ((register_count0) && if ((register_count0) &&
(gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) {
ACPI_REPORT_ERROR(("GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1\n", gpe_number_max, acpi_gbl_FADT->gpe1_base, acpi_gbl_FADT->gpe1_base + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1))); ACPI_ERROR((AE_INFO,
"GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
gpe_number_max, acpi_gbl_FADT->gpe1_base,
acpi_gbl_FADT->gpe1_base +
((register_count1 *
ACPI_GPE_REGISTER_WIDTH) - 1)));
/* Ignore GPE1 block by setting the register count to zero */ /* Ignore GPE1 block by setting the register count to zero */
...@@ -1104,7 +1152,8 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1104,7 +1152,8 @@ acpi_status acpi_ev_gpe_initialize(void)
[1]); [1]);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not create GPE Block 1, %s\n", acpi_format_exception(status))); ACPI_EXCEPTION((AE_INFO, status,
"Could not create GPE Block 1"));
} }
/* /*
...@@ -1130,7 +1179,9 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1130,7 +1179,9 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Check for Max GPE number out-of-range */ /* Check for Max GPE number out-of-range */
if (gpe_number_max > ACPI_GPE_MAX) { if (gpe_number_max > ACPI_GPE_MAX) {
ACPI_REPORT_ERROR(("Maximum GPE number from FADT is too large: 0x%X\n", gpe_number_max)); ACPI_ERROR((AE_INFO,
"Maximum GPE number from FADT is too large: 0x%X",
gpe_number_max));
status = AE_BAD_VALUE; status = AE_BAD_VALUE;
goto cleanup; goto cleanup;
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -303,7 +303,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context) ...@@ -303,7 +303,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
acpi_gbl_global_lock_thread_count); acpi_gbl_global_lock_thread_count);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not signal Global Lock semaphore\n")); ACPI_ERROR((AE_INFO,
"Could not signal Global Lock semaphore"));
} }
} }
} }
...@@ -344,7 +345,8 @@ static u32 acpi_ev_global_lock_handler(void *context) ...@@ -344,7 +345,8 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_ev_global_lock_thread, acpi_ev_global_lock_thread,
context); context);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not queue Global Lock thread, %s\n", acpi_format_exception(status))); ACPI_EXCEPTION((AE_INFO, status,
"Could not queue Global Lock thread"));
return (ACPI_INTERRUPT_NOT_HANDLED); return (ACPI_INTERRUPT_NOT_HANDLED);
} }
...@@ -384,7 +386,8 @@ acpi_status acpi_ev_init_global_lock_handler(void) ...@@ -384,7 +386,8 @@ acpi_status acpi_ev_init_global_lock_handler(void)
* with an error. * with an error.
*/ */
if (status == AE_NO_HARDWARE_RESPONSE) { if (status == AE_NO_HARDWARE_RESPONSE) {
ACPI_REPORT_ERROR(("No response from Global Lock hardware, disabling lock\n")); ACPI_ERROR((AE_INFO,
"No response from Global Lock hardware, disabling lock"));
acpi_gbl_global_lock_present = FALSE; acpi_gbl_global_lock_present = FALSE;
status = AE_OK; status = AE_OK;
...@@ -480,7 +483,8 @@ acpi_status acpi_ev_release_global_lock(void) ...@@ -480,7 +483,8 @@ acpi_status acpi_ev_release_global_lock(void)
ACPI_FUNCTION_TRACE("ev_release_global_lock"); ACPI_FUNCTION_TRACE("ev_release_global_lock");
if (!acpi_gbl_global_lock_thread_count) { if (!acpi_gbl_global_lock_thread_count) {
ACPI_REPORT_WARNING(("Cannot release HW Global Lock, it has not been acquired\n")); ACPI_WARNING((AE_INFO,
"Cannot release HW Global Lock, it has not been acquired"));
return_ACPI_STATUS(AE_NOT_ACQUIRED); return_ACPI_STATUS(AE_NOT_ACQUIRED);
} }
...@@ -542,8 +546,8 @@ void acpi_ev_terminate(void) ...@@ -542,8 +546,8 @@ void acpi_ev_terminate(void)
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
status = acpi_disable_event((u32) i, 0); status = acpi_disable_event((u32) i, 0);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not disable fixed event %d\n", "Could not disable fixed event %d",
(u32) i)); (u32) i));
} }
} }
...@@ -556,8 +560,7 @@ void acpi_ev_terminate(void) ...@@ -556,8 +560,7 @@ void acpi_ev_terminate(void)
status = acpi_ev_remove_sci_handler(); status = acpi_ev_remove_sci_handler();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
"Could not remove SCI handler\n"));
} }
} }
...@@ -570,8 +573,7 @@ void acpi_ev_terminate(void) ...@@ -570,8 +573,7 @@ void acpi_ev_terminate(void)
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
status = acpi_disable(); status = acpi_disable();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, ACPI_WARNING((AE_INFO, "acpi_disable failed"));
"acpi_disable failed\n"));
} }
} }
return_VOID; return_VOID;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -295,10 +295,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -295,10 +295,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
handler_desc = region_obj->region.handler; handler_desc = region_obj->region.handler;
if (!handler_desc) { if (!handler_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"No handler for Region [%4.4s] (%p) [%s]\n", "No handler for Region [%4.4s] (%p) [%s]",
acpi_ut_get_node_name(region_obj->region. acpi_ut_get_node_name(region_obj->region.node),
node), region_obj, region_obj,
acpi_ut_get_region_name(region_obj->region. acpi_ut_get_region_name(region_obj->region.
space_id))); space_id)));
...@@ -317,11 +317,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -317,11 +317,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
if (!region_setup) { if (!region_setup) {
/* No initialization routine, exit with error */ /* No initialization routine, exit with error */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"No init routine for region(%p) [%s]\n", "No init routine for region(%p) [%s]",
region_obj, region_obj,
acpi_ut_get_region_name(region_obj-> acpi_ut_get_region_name(region_obj->region.
region.
space_id))); space_id)));
return_ACPI_STATUS(AE_NOT_EXIST); return_ACPI_STATUS(AE_NOT_EXIST);
} }
...@@ -347,9 +346,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -347,9 +346,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Check for failure of the Region Setup */ /* Check for failure of the Region Setup */
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_EXCEPTION((AE_INFO, status,
"Region Init: %s [%s]\n", "During region initialization: [%s]",
acpi_format_exception(status),
acpi_ut_get_region_name(region_obj-> acpi_ut_get_region_name(region_obj->
region. region.
space_id))); space_id)));
...@@ -406,10 +404,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -406,10 +404,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
region_obj2->extra.region_context); region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Handler for [%s] returned %s\n", ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region. acpi_ut_get_region_name(region_obj->region.
space_id), space_id)));
acpi_format_exception(status)));
} }
if (! if (!
...@@ -501,12 +498,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj, ...@@ -501,12 +498,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
status = acpi_ev_execute_reg_method(region_obj, 0); status = acpi_ev_execute_reg_method(region_obj, 0);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_EXCEPTION((AE_INFO, status,
"%s from region _REG, [%s]\n", "from region _REG, [%s]",
acpi_format_exception(status),
acpi_ut_get_region_name acpi_ut_get_region_name
(region_obj->region. (region_obj->region.space_id)));
space_id)));
} }
if (acpi_ns_is_locked) { if (acpi_ns_is_locked) {
...@@ -528,12 +523,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj, ...@@ -528,12 +523,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Init routine may fail, Just ignore errors */ /* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_EXCEPTION((AE_INFO, status,
"%s from region init, [%s]\n", "from region init, [%s]",
acpi_format_exception(status),
acpi_ut_get_region_name acpi_ut_get_region_name
(region_obj->region. (region_obj->region.space_id)));
space_id)));
} }
region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -233,7 +233,11 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, ...@@ -233,7 +233,11 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*/ */
status = AE_OK; status = AE_OK;
} else { } else {
ACPI_REPORT_ERROR(("Could not install pci_config handler for Root Bridge %4.4s, %s\n", acpi_ut_get_node_name(pci_root_node), acpi_format_exception(status))); ACPI_EXCEPTION((AE_INFO,
status,
"Could not install pci_config handler for Root Bridge %4.4s",
acpi_ut_get_node_name
(pci_root_node)));
} }
} }
break; break;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -88,7 +88,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context) ...@@ -88,7 +88,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/ */
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled); return_UINT32(interrupt_handled);
} }
/******************************************************************************* /*******************************************************************************
...@@ -121,7 +121,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context) ...@@ -121,7 +121,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
*/ */
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled); return_UINT32(interrupt_handled);
} }
/****************************************************************************** /******************************************************************************
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -143,8 +143,8 @@ acpi_install_fixed_event_handler(u32 event, ...@@ -143,8 +143,8 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status)) if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0); status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
"Could not enable fixed event.\n")); event));
/* Remove the handler */ /* Remove the handler */
...@@ -204,10 +204,11 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler) ...@@ -204,10 +204,11 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
acpi_gbl_fixed_event_handlers[event].context = NULL; acpi_gbl_fixed_event_handlers[event].context = NULL;
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, ACPI_WARNING((AE_INFO,
"Could not write to fixed event enable register.\n")); "Could not write to fixed event enable register %X",
event));
} else { } else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X.\n", ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
event)); event));
} }
...@@ -434,7 +435,7 @@ acpi_remove_notify_handler(acpi_handle device, ...@@ -434,7 +435,7 @@ acpi_remove_notify_handler(acpi_handle device,
if (device == ACPI_ROOT_OBJECT) { if (device == ACPI_ROOT_OBJECT) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Removing notify handler for ROOT object.\n")); "Removing notify handler for namespace root object\n"));
if (((handler_type & ACPI_SYSTEM_NOTIFY) && if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
!acpi_gbl_system_notify.handler) || !acpi_gbl_system_notify.handler) ||
...@@ -562,7 +563,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device, ...@@ -562,7 +563,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler; struct acpi_handler_info *handler;
acpi_status status; acpi_status status;
u32 flags; acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_install_gpe_handler"); ACPI_FUNCTION_TRACE("acpi_install_gpe_handler");
...@@ -653,7 +654,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, ...@@ -653,7 +654,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler; struct acpi_handler_info *handler;
acpi_status status; acpi_status status;
u32 flags; acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler"); ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler");
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -70,8 +70,7 @@ acpi_status acpi_enable(void) ...@@ -70,8 +70,7 @@ acpi_status acpi_enable(void)
/* Make sure we have the FADT */ /* Make sure we have the FADT */
if (!acpi_gbl_FADT) { if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, ACPI_WARNING((AE_INFO, "No FADT information present!"));
"No FADT information present!\n"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES); return_ACPI_STATUS(AE_NO_ACPI_TABLES);
} }
...@@ -83,7 +82,8 @@ acpi_status acpi_enable(void) ...@@ -83,7 +82,8 @@ acpi_status acpi_enable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not transition to ACPI mode.\n")); ACPI_ERROR((AE_INFO,
"Could not transition to ACPI mode"));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -113,8 +113,7 @@ acpi_status acpi_disable(void) ...@@ -113,8 +113,7 @@ acpi_status acpi_disable(void)
ACPI_FUNCTION_TRACE("acpi_disable"); ACPI_FUNCTION_TRACE("acpi_disable");
if (!acpi_gbl_FADT) { if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, ACPI_WARNING((AE_INFO, "No FADT information present!"));
"No FADT information present!\n"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES); return_ACPI_STATUS(AE_NO_ACPI_TABLES);
} }
...@@ -127,7 +126,7 @@ acpi_status acpi_disable(void) ...@@ -127,7 +126,7 @@ acpi_status acpi_disable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY); status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not exit ACPI mode to legacy mode")); "Could not exit ACPI mode to legacy mode"));
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -185,8 +184,8 @@ acpi_status acpi_enable_event(u32 event, u32 flags) ...@@ -185,8 +184,8 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
} }
if (value != 1) { if (value != 1) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not enable %s event\n", "Could not enable %s event",
acpi_ut_get_event_name(event))); acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
} }
...@@ -384,8 +383,8 @@ acpi_status acpi_disable_event(u32 event, u32 flags) ...@@ -384,8 +383,8 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
} }
if (value != 0) { if (value != 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Could not disable %s events\n", "Could not disable %s events",
acpi_ut_get_event_name(event))); acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
} }
...@@ -626,6 +625,13 @@ acpi_install_gpe_block(acpi_handle gpe_device, ...@@ -626,6 +625,13 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit; goto unlock_and_exit;
} }
/* Run the _PRW methods and enable the GPEs */
status = acpi_ev_initialize_gpe_block(node, gpe_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/* Get the device_object attached to the node */ /* Get the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node); obj_desc = acpi_ns_get_attached_object(node);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -413,8 +413,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, ...@@ -413,8 +413,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
(!ACPI_STRNCMP(table_ptr->signature, (!ACPI_STRNCMP(table_ptr->signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].signature, acpi_gbl_table_data[ACPI_TABLE_SSDT].signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) { acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Table has invalid signature [%4.4s], must be SSDT or PSDT\n", "Table has invalid signature [%4.4s], must be SSDT or PSDT",
table_ptr->signature)); table_ptr->signature));
status = AE_BAD_SIGNATURE; status = AE_BAD_SIGNATURE;
goto cleanup; goto cleanup;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -504,18 +504,12 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, ...@@ -504,18 +504,12 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
} }
/* /*
* Perform the conversion. * Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above) * (-1 because of extra separator included in string_length from above)
*/ */
string_length--;
if (string_length > ACPI_MAX_STRING_CONVERSION) { /* ACPI limit */
return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
/* Create a new string object and string buffer */
return_desc = return_desc =
acpi_ut_create_string_object((acpi_size) string_length); acpi_ut_create_string_object((acpi_size)
(string_length - 1));
if (!return_desc) { if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY); return_ACPI_STATUS(AE_NO_MEMORY);
} }
...@@ -647,7 +641,9 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type, ...@@ -647,7 +641,9 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
break; break;
default: default:
ACPI_REPORT_ERROR(("Bad destination type during conversion: %X\n", destination_type)); ACPI_ERROR((AE_INFO,
"Bad destination type during conversion: %X",
destination_type));
status = AE_AML_INTERNAL; status = AE_AML_INTERNAL;
break; break;
} }
...@@ -660,16 +656,12 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type, ...@@ -660,16 +656,12 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
break; break;
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Unknown Target type ID 0x%X Op %s dest_type %s\n", "Unknown Target type ID 0x%X aml_opcode %X dest_type %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info-> GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args), runtime_args),
walk_state->op_info->name, walk_state->opcode,
acpi_ut_get_type_name(destination_type))); acpi_ut_get_type_name(destination_type)));
ACPI_REPORT_ERROR(("Bad Target Type (ARGI): %X\n",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args)))
status = AE_AML_INTERNAL; status = AE_AML_INTERNAL;
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -300,7 +300,7 @@ acpi_ex_create_region(u8 * aml_start, ...@@ -300,7 +300,7 @@ acpi_ex_create_region(u8 * aml_start,
*/ */
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) { (region_space < ACPI_USER_REGION_BEGIN)) {
ACPI_REPORT_ERROR(("Invalid address_space type %X\n", ACPI_ERROR((AE_INFO, "Invalid address_space type %X",
region_space)); region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
} }
......
此差异已折叠。
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -249,13 +249,18 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, ...@@ -249,13 +249,18 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
* Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE). * Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE).
*/ */
if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) { if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
ACPI_REPORT_ERROR(("SMBus write requires Buffer, found type %s\n", acpi_ut_get_object_type_name(source_desc))); ACPI_ERROR((AE_INFO,
"SMBus write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE); return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
} }
if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) { if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) {
ACPI_REPORT_ERROR(("SMBus write requires Buffer of length %X, found length %X\n", ACPI_SMBUS_BUFFER_SIZE, source_desc->buffer.length)); ACPI_ERROR((AE_INFO,
"SMBus write requires Buffer of length %X, found length %X",
ACPI_SMBUS_BUFFER_SIZE,
source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -94,8 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, ...@@ -94,8 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */ /* We must have a valid region */
if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) { if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
"Needed Region, found type %X (%s)\n",
ACPI_GET_OBJECT_TYPE(rgn_desc), ACPI_GET_OBJECT_TYPE(rgn_desc),
acpi_ut_get_object_type_name(rgn_desc))); acpi_ut_get_object_type_name(rgn_desc)));
...@@ -162,15 +161,13 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, ...@@ -162,15 +161,13 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* than the region itself. For example, a region of length one * than the region itself. For example, a region of length one
* byte, and a field with Dword access specified. * byte, and a field with Dword access specified.
*/ */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)\n", "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
acpi_ut_get_node_name(obj_desc-> acpi_ut_get_node_name(obj_desc->
common_field. common_field.node),
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.
node), node),
obj_desc->common_field.
access_byte_width,
acpi_ut_get_node_name(rgn_desc->
region.node),
rgn_desc->region.length)); rgn_desc->region.length));
} }
...@@ -178,10 +175,9 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, ...@@ -178,10 +175,9 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* Offset rounded up to next multiple of field width * Offset rounded up to next multiple of field width
* exceeds region length, indicate an error * exceeds region length, indicate an error
*/ */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)\n", "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
acpi_ut_get_node_name(obj_desc->common_field. acpi_ut_get_node_name(obj_desc->common_field.node),
node),
obj_desc->common_field.base_byte_offset, obj_desc->common_field.base_byte_offset,
field_datum_byte_offset, field_datum_byte_offset,
obj_desc->common_field.access_byte_width, obj_desc->common_field.access_byte_width,
...@@ -270,16 +266,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, ...@@ -270,16 +266,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) { if (status == AE_NOT_IMPLEMENTED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Region %s(%X) not implemented\n", "Region %s(%X) not implemented",
acpi_ut_get_region_name(rgn_desc-> acpi_ut_get_region_name(rgn_desc->region.
region.
space_id), space_id),
rgn_desc->region.space_id)); rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) { } else if (status == AE_NOT_EXIST) {
ACPI_REPORT_ERROR(("Region %s(%X) has no handler\n", ACPI_ERROR((AE_INFO,
acpi_ut_get_region_name(rgn_desc-> "Region %s(%X) has no handler",
region. acpi_ut_get_region_name(rgn_desc->region.
space_id), space_id),
rgn_desc->region.space_id)); rgn_desc->region.space_id));
} }
...@@ -514,7 +509,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, ...@@ -514,7 +509,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
default: default:
ACPI_REPORT_ERROR(("Wrong object type in field I/O %X\n", ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
ACPI_GET_OBJECT_TYPE(obj_desc))); ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_INTERNAL; status = AE_AML_INTERNAL;
break; break;
...@@ -618,8 +613,8 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc, ...@@ -618,8 +613,8 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default: default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"write_with_update_rule: Unknown update_rule setting: %X\n", "Unknown update_rule value: %X",
(obj_desc->common_field. (obj_desc->common_field.
field_flags & field_flags &
AML_FIELD_UPDATE_RULE_MASK))); AML_FIELD_UPDATE_RULE_MASK)));
...@@ -677,10 +672,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, ...@@ -677,10 +672,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length < if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) { ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Field size %X (bits) is too large for buffer (%X)\n", "Field size %X (bits) is too large for buffer (%X)",
obj_desc->common_field.bit_length, obj_desc->common_field.bit_length, buffer_length));
buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW); return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
} }
...@@ -792,10 +786,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, ...@@ -792,10 +786,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
if (buffer_length < if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) { ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, ACPI_ERROR((AE_INFO,
"Field size %X (bits) is too large for buffer (%X)\n", "Field size %X (bits) is too large for buffer (%X)",
obj_desc->common_field.bit_length, obj_desc->common_field.bit_length, buffer_length));
buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW); return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include <acpi/acinterp.h> #include <acpi/acinterp.h>
#include <acpi/amlcode.h> #include <acpi/amlcode.h>
#include <acpi/amlresrc.h>
#define _COMPONENT ACPI_EXECUTER #define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exmisc") ACPI_MODULE_NAME("exmisc")
...@@ -97,7 +98,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc, ...@@ -97,7 +98,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default: default:
ACPI_REPORT_ERROR(("Unknown Reference opcode in get_reference %X\n", obj_desc->reference.opcode)); ACPI_ERROR((AE_INFO, "Unknown Reference opcode %X",
obj_desc->reference.opcode));
return_ACPI_STATUS(AE_AML_INTERNAL); return_ACPI_STATUS(AE_AML_INTERNAL);
} }
break; break;
...@@ -112,7 +114,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc, ...@@ -112,7 +114,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default: default:
ACPI_REPORT_ERROR(("Invalid descriptor type in get_reference: %X\n", ACPI_GET_DESCRIPTOR_TYPE(obj_desc))); ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
return_ACPI_STATUS(AE_TYPE); return_ACPI_STATUS(AE_TYPE);
} }
...@@ -157,48 +160,65 @@ acpi_ex_concat_template(union acpi_operand_object *operand0, ...@@ -157,48 +160,65 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
union acpi_operand_object **actual_return_desc, union acpi_operand_object **actual_return_desc,
struct acpi_walk_state *walk_state) struct acpi_walk_state *walk_state)
{ {
acpi_status status;
union acpi_operand_object *return_desc; union acpi_operand_object *return_desc;
u8 *new_buf; u8 *new_buf;
u8 *end_tag1; u8 *end_tag;
u8 *end_tag2; acpi_size length0;
acpi_size length1; acpi_size length1;
acpi_size length2; acpi_size new_length;
ACPI_FUNCTION_TRACE("ex_concat_template"); ACPI_FUNCTION_TRACE("ex_concat_template");
/* Find the end_tags in each resource template */ /*
* Find the end_tag descriptor in each resource template.
* Note1: returned pointers point TO the end_tag, not past it.
* Note2: zero-length buffers are allowed; treated like one end_tag
*/
end_tag1 = acpi_ut_get_resource_end_tag(operand0); /* Get the length of the first resource template */
end_tag2 = acpi_ut_get_resource_end_tag(operand1);
if (!end_tag1 || !end_tag2) { status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
return_ACPI_STATUS(AE_AML_OPERAND_TYPE); if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
} }
/* Compute the length of each part */ length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
/* Get the length of the second resource template */
status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
length1 = ACPI_PTR_DIFF(end_tag1, operand0->buffer.pointer); length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
length2 = ACPI_PTR_DIFF(end_tag2, operand1->buffer.pointer) + 2; /* Size of END_TAG */
/* Create a new buffer object for the result */ /* Combine both lengths, minimum size will be 2 for end_tag */
return_desc = acpi_ut_create_buffer_object(length1 + length2); new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
/* Create a new buffer object for the result (with one end_tag) */
return_desc = acpi_ut_create_buffer_object(new_length);
if (!return_desc) { if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY); return_ACPI_STATUS(AE_NO_MEMORY);
} }
/* Copy the templates to the new descriptor */ /*
* Copy the templates to the new buffer, 0 first, then 1 follows. One
* end_tag descriptor is copied from Operand1.
*/
new_buf = return_desc->buffer.pointer; new_buf = return_desc->buffer.pointer;
ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length1); ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
ACPI_MEMCPY(new_buf + length1, operand1->buffer.pointer, length2); ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
/* Compute the new checksum */ /* Insert end_tag and set the checksum to zero, means "ignore checksum" */
new_buf[return_desc->buffer.length - 1] = new_buf[new_length - 1] = 0;
acpi_ut_generate_checksum(return_desc->buffer.pointer, new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
(return_desc->buffer.length - 1));
/* Return the completed template descriptor */ /* Return the completed resource template */
*actual_return_desc = return_desc; *actual_return_desc = return_desc;
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
...@@ -229,7 +249,6 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ...@@ -229,7 +249,6 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
union acpi_operand_object *return_desc; union acpi_operand_object *return_desc;
char *new_buf; char *new_buf;
acpi_status status; acpi_status status;
acpi_size new_length;
ACPI_FUNCTION_TRACE("ex_do_concatenate"); ACPI_FUNCTION_TRACE("ex_do_concatenate");
...@@ -256,7 +275,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ...@@ -256,7 +275,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
break; break;
default: default:
ACPI_REPORT_ERROR(("Concat - invalid obj type: %X\n", ACPI_ERROR((AE_INFO, "Invalid object type: %X",
ACPI_GET_OBJECT_TYPE(operand0))); ACPI_GET_OBJECT_TYPE(operand0)));
status = AE_AML_INTERNAL; status = AE_AML_INTERNAL;
} }
...@@ -296,8 +315,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ...@@ -296,8 +315,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Copy the first integer, LSB first */ /* Copy the first integer, LSB first */
ACPI_MEMCPY(new_buf, ACPI_MEMCPY(new_buf, &operand0->integer.value,
&operand0->integer.value,
acpi_gbl_integer_byte_width); acpi_gbl_integer_byte_width);
/* Copy the second integer (LSB first) after the first */ /* Copy the second integer (LSB first) after the first */
...@@ -311,14 +329,11 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ...@@ -311,14 +329,11 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Result of two Strings is a String */ /* Result of two Strings is a String */
new_length = (acpi_size) operand0->string.length + return_desc = acpi_ut_create_string_object((acpi_size)
(acpi_size) local_operand1->string.length; (operand0->string.
if (new_length > ACPI_MAX_STRING_CONVERSION) { length +
status = AE_AML_STRING_LIMIT; local_operand1->
goto cleanup; string.length));
}
return_desc = acpi_ut_create_string_object(new_length);
if (!return_desc) { if (!return_desc) {
status = AE_NO_MEMORY; status = AE_NO_MEMORY;
goto cleanup; goto cleanup;
...@@ -338,11 +353,10 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ...@@ -338,11 +353,10 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Result of two Buffers is a Buffer */ /* Result of two Buffers is a Buffer */
return_desc = acpi_ut_create_buffer_object((acpi_size) return_desc = acpi_ut_create_buffer_object((acpi_size)
operand0->buffer. (operand0->buffer.
length + length +
(acpi_size)
local_operand1-> local_operand1->
buffer.length); buffer.length));
if (!return_desc) { if (!return_desc) {
status = AE_NO_MEMORY; status = AE_NO_MEMORY;
goto cleanup; goto cleanup;
...@@ -352,8 +366,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ...@@ -352,8 +366,8 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Concatenate the buffers */ /* Concatenate the buffers */
ACPI_MEMCPY(new_buf, ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
operand0->buffer.pointer, operand0->buffer.length); operand0->buffer.length);
ACPI_MEMCPY(new_buf + operand0->buffer.length, ACPI_MEMCPY(new_buf + operand0->buffer.length,
local_operand1->buffer.pointer, local_operand1->buffer.pointer,
local_operand1->buffer.length); local_operand1->buffer.length);
...@@ -363,7 +377,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ...@@ -363,7 +377,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Invalid object type, should not happen here */ /* Invalid object type, should not happen here */
ACPI_REPORT_ERROR(("Concatenate - Invalid object type: %X\n", ACPI_ERROR((AE_INFO, "Invalid object type: %X",
ACPI_GET_OBJECT_TYPE(operand0))); ACPI_GET_OBJECT_TYPE(operand0)));
status = AE_AML_INTERNAL; status = AE_AML_INTERNAL;
goto cleanup; goto cleanup;
...@@ -625,9 +639,8 @@ acpi_ex_do_logical_op(u16 opcode, ...@@ -625,9 +639,8 @@ acpi_ex_do_logical_op(u16 opcode,
/* Lexicographic compare: compare the data bytes */ /* Lexicographic compare: compare the data bytes */
compare = ACPI_MEMCMP((const char *)operand0->buffer.pointer, compare = ACPI_MEMCMP(operand0->buffer.pointer,
(const char *)local_operand1->buffer. local_operand1->buffer.pointer,
pointer,
(length0 > length1) ? length1 : length0); (length0 > length1) ? length1 : length0);
switch (opcode) { switch (opcode) {
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -153,7 +153,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, ...@@ -153,7 +153,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Sanity check -- we must have a valid thread ID */ /* Sanity check -- we must have a valid thread ID */
if (!walk_state->thread) { if (!walk_state->thread) {
ACPI_REPORT_ERROR(("Cannot acquire Mutex [%4.4s], null thread info\n", acpi_ut_get_node_name(obj_desc->mutex.node))); ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL); return_ACPI_STATUS(AE_AML_INTERNAL);
} }
...@@ -162,7 +164,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, ...@@ -162,7 +164,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
* mutex. This mechanism provides some deadlock prevention * mutex. This mechanism provides some deadlock prevention
*/ */
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_REPORT_ERROR(("Cannot acquire Mutex [%4.4s], incorrect sync_level\n", acpi_ut_get_node_name(obj_desc->mutex.node))); ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], incorrect sync_level",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER); return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
} }
...@@ -237,14 +241,18 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -237,14 +241,18 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* The mutex must have been previously acquired in order to release it */ /* The mutex must have been previously acquired in order to release it */
if (!obj_desc->mutex.owner_thread) { if (!obj_desc->mutex.owner_thread) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], not acquired\n", acpi_ut_get_node_name(obj_desc->mutex.node))); ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
} }
/* Sanity check -- we must have a valid thread ID */ /* Sanity check -- we must have a valid thread ID */
if (!walk_state->thread) { if (!walk_state->thread) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], null thread info\n", acpi_ut_get_node_name(obj_desc->mutex.node))); ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL); return_ACPI_STATUS(AE_AML_INTERNAL);
} }
...@@ -255,7 +263,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -255,7 +263,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
if ((obj_desc->mutex.owner_thread->thread_id != if ((obj_desc->mutex.owner_thread->thread_id !=
walk_state->thread->thread_id) walk_state->thread->thread_id)
&& (obj_desc->mutex.semaphore != acpi_gbl_global_lock_semaphore)) { && (obj_desc->mutex.semaphore != acpi_gbl_global_lock_semaphore)) {
ACPI_REPORT_ERROR(("Thread %X cannot release Mutex [%4.4s] acquired by thread %X\n", walk_state->thread->thread_id, acpi_ut_get_node_name(obj_desc->mutex.node), obj_desc->mutex.owner_thread->thread_id)); ACPI_ERROR((AE_INFO,
"Thread %X cannot release Mutex [%4.4s] acquired by thread %X",
walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.owner_thread->thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER); return_ACPI_STATUS(AE_AML_NOT_OWNER);
} }
...@@ -264,7 +276,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -264,7 +276,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* equal to the current sync level * equal to the current sync level
*/ */
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], incorrect sync_level\n", acpi_ut_get_node_name(obj_desc->mutex.node))); ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], incorrect sync_level",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER); return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -119,7 +119,8 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state) ...@@ -119,7 +119,8 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
default: default:
ACPI_REPORT_ERROR(("acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", walk_state->opcode)); ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE; status = AE_AML_BAD_OPCODE;
goto cleanup; goto cleanup;
} }
...@@ -223,8 +224,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) ...@@ -223,8 +224,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
goto cleanup; goto cleanup;
} }
if (length > 0) { if (buffer) {
/* Copy the portion requested */ /* We have a buffer, copy the portion requested */
ACPI_MEMCPY(buffer, operand[0]->string.pointer + index, ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
length); length);
...@@ -242,7 +243,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) ...@@ -242,7 +243,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
default: default:
ACPI_REPORT_ERROR(("acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", walk_state->opcode)); ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE; status = AE_AML_BAD_OPCODE;
goto cleanup; goto cleanup;
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2005, R. Byron Moore * Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -71,7 +71,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, ...@@ -71,7 +71,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
/* We know that source_desc is a buffer by now */ /* We know that source_desc is a buffer by now */
buffer = (u8 *) source_desc->buffer.pointer; buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer);
length = source_desc->buffer.length; length = source_desc->buffer.length;
/* /*
...@@ -160,7 +160,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, ...@@ -160,7 +160,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
/* We know that source_desc is a string by now */ /* We know that source_desc is a string by now */
buffer = (u8 *) source_desc->string.pointer; buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer);
length = source_desc->string.length; length = source_desc->string.length;
/* /*
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册