提交 3506ba7b 编写于 作者: L Linus Torvalds

Merge branch 'agp-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6

* 'agp-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6:
  agp/intel: cleanup some serious whitespace badness
  [AGP] intel_agp: Add support for Intel 4 series chipsets
  [AGP] intel_agp: extra stolen mem size available for IGD_GM chipset
  agp: more boolean conversions.
  drivers/char/agp - use bool
  agp: two-stage page destruction issue
  agp/via: fixup pci ids
...@@ -99,8 +99,8 @@ struct agp_bridge_driver { ...@@ -99,8 +99,8 @@ struct agp_bridge_driver {
const void *aperture_sizes; const void *aperture_sizes;
int num_aperture_sizes; int num_aperture_sizes;
enum aper_size_type size_type; enum aper_size_type size_type;
int cant_use_aperture; bool cant_use_aperture;
int needs_scratch_page; bool needs_scratch_page;
const struct gatt_mask *masks; const struct gatt_mask *masks;
int (*fetch_size)(void); int (*fetch_size)(void);
int (*configure)(void); int (*configure)(void);
...@@ -278,7 +278,7 @@ void agp_generic_destroy_page(void *addr, int flags); ...@@ -278,7 +278,7 @@ void agp_generic_destroy_page(void *addr, int flags);
void agp_free_key(int key); void agp_free_key(int key);
int agp_num_entries(void); int agp_num_entries(void);
u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command); u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
void agp_device_command(u32 command, int agp_v3); void agp_device_command(u32 command, bool agp_v3);
int agp_3_5_enable(struct agp_bridge_data *bridge); int agp_3_5_enable(struct agp_bridge_data *bridge);
void global_cache_flush(void); void global_cache_flush(void);
void get_agp_version(struct agp_bridge_data *bridge); void get_agp_version(struct agp_bridge_data *bridge);
......
...@@ -80,7 +80,7 @@ static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode) ...@@ -80,7 +80,7 @@ static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
agp->mode.bits.enable = 1; agp->mode.bits.enable = 1;
agp->ops->configure(agp); agp->ops->configure(agp);
agp_device_command(agp->mode.lw, 0); agp_device_command(agp->mode.lw, false);
} }
static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start, static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
...@@ -126,7 +126,7 @@ struct agp_bridge_driver alpha_core_agp_driver = { ...@@ -126,7 +126,7 @@ struct agp_bridge_driver alpha_core_agp_driver = {
.aperture_sizes = alpha_core_agp_sizes, .aperture_sizes = alpha_core_agp_sizes,
.num_aperture_sizes = 1, .num_aperture_sizes = 1,
.size_type = FIXED_APER_SIZE, .size_type = FIXED_APER_SIZE,
.cant_use_aperture = 1, .cant_use_aperture = true,
.masks = NULL, .masks = NULL,
.fetch_size = alpha_core_agp_fetch_size, .fetch_size = alpha_core_agp_fetch_size,
......
...@@ -314,9 +314,9 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) ...@@ -314,9 +314,9 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
......
...@@ -90,9 +90,9 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) ...@@ -90,9 +90,9 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
......
...@@ -287,10 +287,10 @@ static int ati_insert_memory(struct agp_memory * mem, ...@@ -287,10 +287,10 @@ static int ati_insert_memory(struct agp_memory * mem,
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
/*CACHE_FLUSH(); */ /*CACHE_FLUSH(); */
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
......
...@@ -188,10 +188,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) ...@@ -188,10 +188,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
err_out: err_out:
if (bridge->driver->needs_scratch_page) { if (bridge->driver->needs_scratch_page) {
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), void *va = gart_to_virt(bridge->scratch_page_real);
AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
AGP_PAGE_DESTROY_FREE); bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
} }
if (got_gatt) if (got_gatt)
bridge->driver->free_gatt_table(bridge); bridge->driver->free_gatt_table(bridge);
...@@ -215,10 +215,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge) ...@@ -215,10 +215,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
if (bridge->driver->agp_destroy_page && if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page) { bridge->driver->needs_scratch_page) {
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), void *va = gart_to_virt(bridge->scratch_page_real);
AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
AGP_PAGE_DESTROY_FREE); bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
} }
} }
......
...@@ -214,7 +214,7 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -214,7 +214,7 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret_val = -EINVAL; ret_val = -EINVAL;
goto ioctl_out; goto ioctl_out;
} }
if ((agp_fe.backend_acquired != TRUE) && if ((agp_fe.backend_acquired != true) &&
(cmd != AGPIOC_ACQUIRE32)) { (cmd != AGPIOC_ACQUIRE32)) {
ret_val = -EBUSY; ret_val = -EBUSY;
goto ioctl_out; goto ioctl_out;
......
...@@ -249,9 +249,9 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t ...@@ -249,9 +249,9 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t
if (type != 0 || mem->type != 0) if (type != 0 || mem->type != 0)
return -EINVAL; return -EINVAL;
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
last_page = NULL; last_page = NULL;
...@@ -329,7 +329,7 @@ static const struct agp_bridge_driver efficeon_driver = { ...@@ -329,7 +329,7 @@ static const struct agp_bridge_driver efficeon_driver = {
.free_gatt_table = efficeon_free_gatt_table, .free_gatt_table = efficeon_free_gatt_table,
.insert_memory = efficeon_insert_memory, .insert_memory = efficeon_insert_memory,
.remove_memory = efficeon_remove_memory, .remove_memory = efficeon_remove_memory,
.cant_use_aperture = 0, // 1 might be faster? .cant_use_aperture = false, // true might be faster?
// Generic // Generic
.alloc_by_type = agp_generic_alloc_by_type, .alloc_by_type = agp_generic_alloc_by_type,
......
...@@ -395,7 +395,7 @@ static int agp_remove_controller(struct agp_controller *controller) ...@@ -395,7 +395,7 @@ static int agp_remove_controller(struct agp_controller *controller)
if (agp_fe.current_controller == controller) { if (agp_fe.current_controller == controller) {
agp_fe.current_controller = NULL; agp_fe.current_controller = NULL;
agp_fe.backend_acquired = FALSE; agp_fe.backend_acquired = false;
agp_backend_release(agp_bridge); agp_backend_release(agp_bridge);
} }
kfree(controller); kfree(controller);
...@@ -443,7 +443,7 @@ static void agp_controller_release_current(struct agp_controller *controller, ...@@ -443,7 +443,7 @@ static void agp_controller_release_current(struct agp_controller *controller,
} }
agp_fe.current_controller = NULL; agp_fe.current_controller = NULL;
agp_fe.used_by_controller = FALSE; agp_fe.used_by_controller = false;
agp_backend_release(agp_bridge); agp_backend_release(agp_bridge);
} }
...@@ -573,7 +573,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -573,7 +573,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&(agp_fe.agp_mutex)); mutex_lock(&(agp_fe.agp_mutex));
if (agp_fe.backend_acquired != TRUE) if (agp_fe.backend_acquired != true)
goto out_eperm; goto out_eperm;
if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags))) if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
...@@ -768,7 +768,7 @@ int agpioc_acquire_wrap(struct agp_file_private *priv) ...@@ -768,7 +768,7 @@ int agpioc_acquire_wrap(struct agp_file_private *priv)
atomic_inc(&agp_bridge->agp_in_use); atomic_inc(&agp_bridge->agp_in_use);
agp_fe.backend_acquired = TRUE; agp_fe.backend_acquired = true;
controller = agp_find_controller_by_pid(priv->my_pid); controller = agp_find_controller_by_pid(priv->my_pid);
...@@ -778,7 +778,7 @@ int agpioc_acquire_wrap(struct agp_file_private *priv) ...@@ -778,7 +778,7 @@ int agpioc_acquire_wrap(struct agp_file_private *priv)
controller = agp_create_controller(priv->my_pid); controller = agp_create_controller(priv->my_pid);
if (controller == NULL) { if (controller == NULL) {
agp_fe.backend_acquired = FALSE; agp_fe.backend_acquired = false;
agp_backend_release(agp_bridge); agp_backend_release(agp_bridge);
return -ENOMEM; return -ENOMEM;
} }
...@@ -981,7 +981,7 @@ static long agp_ioctl(struct file *file, ...@@ -981,7 +981,7 @@ static long agp_ioctl(struct file *file,
ret_val = -EINVAL; ret_val = -EINVAL;
goto ioctl_out; goto ioctl_out;
} }
if ((agp_fe.backend_acquired != TRUE) && if ((agp_fe.backend_acquired != true) &&
(cmd != AGPIOC_ACQUIRE)) { (cmd != AGPIOC_ACQUIRE)) {
ret_val = -EBUSY; ret_val = -EBUSY;
goto ioctl_out; goto ioctl_out;
......
...@@ -96,13 +96,13 @@ EXPORT_SYMBOL(agp_flush_chipset); ...@@ -96,13 +96,13 @@ EXPORT_SYMBOL(agp_flush_chipset);
void agp_alloc_page_array(size_t size, struct agp_memory *mem) void agp_alloc_page_array(size_t size, struct agp_memory *mem)
{ {
mem->memory = NULL; mem->memory = NULL;
mem->vmalloc_flag = 0; mem->vmalloc_flag = false;
if (size <= 2*PAGE_SIZE) if (size <= 2*PAGE_SIZE)
mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
if (mem->memory == NULL) { if (mem->memory == NULL) {
mem->memory = vmalloc(size); mem->memory = vmalloc(size);
mem->vmalloc_flag = 1; mem->vmalloc_flag = true;
} }
} }
EXPORT_SYMBOL(agp_alloc_page_array); EXPORT_SYMBOL(agp_alloc_page_array);
...@@ -188,7 +188,7 @@ void agp_free_memory(struct agp_memory *curr) ...@@ -188,7 +188,7 @@ void agp_free_memory(struct agp_memory *curr)
if (curr == NULL) if (curr == NULL)
return; return;
if (curr->is_bound == TRUE) if (curr->is_bound)
agp_unbind_memory(curr); agp_unbind_memory(curr);
if (curr->type >= AGP_USER_TYPES) { if (curr->type >= AGP_USER_TYPES) {
...@@ -202,10 +202,13 @@ void agp_free_memory(struct agp_memory *curr) ...@@ -202,10 +202,13 @@ void agp_free_memory(struct agp_memory *curr)
} }
if (curr->page_count != 0) { if (curr->page_count != 0) {
for (i = 0; i < curr->page_count; i++) { for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP); curr->memory[i] = (unsigned long)gart_to_virt(curr->memory[i]);
curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
AGP_PAGE_DESTROY_UNMAP);
} }
for (i = 0; i < curr->page_count; i++) { for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE); curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
AGP_PAGE_DESTROY_FREE);
} }
} }
agp_free_key(curr->key); agp_free_key(curr->key);
...@@ -411,20 +414,20 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) ...@@ -411,20 +414,20 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
if (curr == NULL) if (curr == NULL)
return -EINVAL; return -EINVAL;
if (curr->is_bound == TRUE) { if (curr->is_bound) {
printk(KERN_INFO PFX "memory %p is already bound!\n", curr); printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
return -EINVAL; return -EINVAL;
} }
if (curr->is_flushed == FALSE) { if (!curr->is_flushed) {
curr->bridge->driver->cache_flush(); curr->bridge->driver->cache_flush();
curr->is_flushed = TRUE; curr->is_flushed = true;
} }
ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
if (ret_val != 0) if (ret_val != 0)
return ret_val; return ret_val;
curr->is_bound = TRUE; curr->is_bound = true;
curr->pg_start = pg_start; curr->pg_start = pg_start;
return 0; return 0;
} }
...@@ -446,7 +449,7 @@ int agp_unbind_memory(struct agp_memory *curr) ...@@ -446,7 +449,7 @@ int agp_unbind_memory(struct agp_memory *curr)
if (curr == NULL) if (curr == NULL)
return -EINVAL; return -EINVAL;
if (curr->is_bound != TRUE) { if (!curr->is_bound) {
printk(KERN_INFO PFX "memory %p was not bound!\n", curr); printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
return -EINVAL; return -EINVAL;
} }
...@@ -456,7 +459,7 @@ int agp_unbind_memory(struct agp_memory *curr) ...@@ -456,7 +459,7 @@ int agp_unbind_memory(struct agp_memory *curr)
if (ret_val != 0) if (ret_val != 0)
return ret_val; return ret_val;
curr->is_bound = FALSE; curr->is_bound = false;
curr->pg_start = 0; curr->pg_start = 0;
return 0; return 0;
} }
...@@ -754,7 +757,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode ...@@ -754,7 +757,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode
EXPORT_SYMBOL(agp_collect_device_status); EXPORT_SYMBOL(agp_collect_device_status);
void agp_device_command(u32 bridge_agpstat, int agp_v3) void agp_device_command(u32 bridge_agpstat, bool agp_v3)
{ {
struct pci_dev *device = NULL; struct pci_dev *device = NULL;
int mode; int mode;
...@@ -818,7 +821,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) ...@@ -818,7 +821,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
/* If we have 3.5, we can do the isoch stuff. */ /* If we have 3.5, we can do the isoch stuff. */
if (bridge->minor_version >= 5) if (bridge->minor_version >= 5)
agp_3_5_enable(bridge); agp_3_5_enable(bridge);
agp_device_command(bridge_agpstat, TRUE); agp_device_command(bridge_agpstat, true);
return; return;
} else { } else {
/* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
...@@ -835,7 +838,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) ...@@ -835,7 +838,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
} }
/* AGP v<3 */ /* AGP v<3 */
agp_device_command(bridge_agpstat, FALSE); agp_device_command(bridge_agpstat, false);
} }
EXPORT_SYMBOL(agp_generic_enable); EXPORT_SYMBOL(agp_generic_enable);
...@@ -1083,9 +1086,9 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) ...@@ -1083,9 +1086,9 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
bridge->driver->cache_flush(); bridge->driver->cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
......
...@@ -353,9 +353,9 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type) ...@@ -353,9 +353,9 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = io_pg_start; i < mem->page_count; i++) { for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
...@@ -437,7 +437,7 @@ const struct agp_bridge_driver hp_zx1_driver = { ...@@ -437,7 +437,7 @@ const struct agp_bridge_driver hp_zx1_driver = {
.agp_alloc_page = agp_generic_alloc_page, .agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page, .agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1, .cant_use_aperture = true,
}; };
static int __init static int __init
......
...@@ -580,7 +580,7 @@ const struct agp_bridge_driver intel_i460_driver = { ...@@ -580,7 +580,7 @@ const struct agp_bridge_driver intel_i460_driver = {
.alloc_by_type = agp_generic_alloc_by_type, .alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type, .free_by_type = agp_generic_free_by_type,
.agp_type_to_mask_type = agp_generic_type_to_mask_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1, .cant_use_aperture = true,
}; };
static int __devinit agp_intel_i460_probe(struct pci_dev *pdev, static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
......
...@@ -34,6 +34,12 @@ ...@@ -34,6 +34,12 @@
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40 #define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42 #define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
/* cover 915 and 945 variants */ /* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
...@@ -55,6 +61,10 @@ ...@@ -55,6 +61,10 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB) agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB)
extern int agp_memory_reserved; extern int agp_memory_reserved;
...@@ -80,8 +90,13 @@ extern int agp_memory_reserved; ...@@ -80,8 +90,13 @@ extern int agp_memory_reserved;
#define I915_PTEADDR 0x1C #define I915_PTEADDR 0x1C
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60 #define I915_IFPADDR 0x60
/* Intel 965G registers */ /* Intel 965G registers */
...@@ -325,7 +340,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, ...@@ -325,7 +340,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
out: out:
ret = 0; ret = 0;
out_err: out_err:
mem->is_flushed = 1; mem->is_flushed = true;
return ret; return ret;
} }
...@@ -418,9 +433,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr) ...@@ -418,9 +433,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
if (curr->page_count == 4) if (curr->page_count == 4)
i8xx_destroy_pages(gart_to_virt(curr->memory[0])); i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
else { else {
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), void *va = gart_to_virt(curr->memory[0]);
agp_bridge->driver->agp_destroy_page(va,
AGP_PAGE_DESTROY_UNMAP); AGP_PAGE_DESTROY_UNMAP);
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), agp_bridge->driver->agp_destroy_page(va,
AGP_PAGE_DESTROY_FREE); AGP_PAGE_DESTROY_FREE);
} }
agp_free_page_array(curr); agp_free_page_array(curr);
...@@ -504,6 +521,10 @@ static void intel_i830_init_gtt_entries(void) ...@@ -504,6 +521,10 @@ static void intel_i830_init_gtt_entries(void)
size = 512; size = 512;
} }
size += 4; size += 4;
} else if (IS_G4X) {
/* On 4 series hardware, GTT stolen is separate from graphics
* stolen, ignore it in stolen gtt entries counting */
size = 0;
} else { } else {
/* On previous hardware, the GTT size was just what was /* On previous hardware, the GTT size was just what was
* required to map the aperture. * required to map the aperture.
...@@ -552,30 +573,54 @@ static void intel_i830_init_gtt_entries(void) ...@@ -552,30 +573,54 @@ static void intel_i830_init_gtt_entries(void)
break; break;
case I915_GMCH_GMS_STOLEN_48M: case I915_GMCH_GMS_STOLEN_48M:
/* Check it's really I915G */ /* Check it's really I915G */
if (IS_I915 || IS_I965 || IS_G33) if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
gtt_entries = MB(48) - KB(size); gtt_entries = MB(48) - KB(size);
else else
gtt_entries = 0; gtt_entries = 0;
break; break;
case I915_GMCH_GMS_STOLEN_64M: case I915_GMCH_GMS_STOLEN_64M:
/* Check it's really I915G */ /* Check it's really I915G */
if (IS_I915 || IS_I965 || IS_G33) if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
gtt_entries = MB(64) - KB(size); gtt_entries = MB(64) - KB(size);
else else
gtt_entries = 0; gtt_entries = 0;
break; break;
case G33_GMCH_GMS_STOLEN_128M: case G33_GMCH_GMS_STOLEN_128M:
if (IS_G33) if (IS_G33 || IS_I965 || IS_G4X)
gtt_entries = MB(128) - KB(size); gtt_entries = MB(128) - KB(size);
else else
gtt_entries = 0; gtt_entries = 0;
break; break;
case G33_GMCH_GMS_STOLEN_256M: case G33_GMCH_GMS_STOLEN_256M:
if (IS_G33) if (IS_G33 || IS_I965 || IS_G4X)
gtt_entries = MB(256) - KB(size); gtt_entries = MB(256) - KB(size);
else else
gtt_entries = 0; gtt_entries = 0;
break; break;
case INTEL_GMCH_GMS_STOLEN_96M:
if (IS_I965 || IS_G4X)
gtt_entries = MB(96) - KB(size);
else
gtt_entries = 0;
break;
case INTEL_GMCH_GMS_STOLEN_160M:
if (IS_I965 || IS_G4X)
gtt_entries = MB(160) - KB(size);
else
gtt_entries = 0;
break;
case INTEL_GMCH_GMS_STOLEN_224M:
if (IS_I965 || IS_G4X)
gtt_entries = MB(224) - KB(size);
else
gtt_entries = 0;
break;
case INTEL_GMCH_GMS_STOLEN_352M:
if (IS_I965 || IS_G4X)
gtt_entries = MB(352) - KB(size);
else
gtt_entries = 0;
break;
default: default:
gtt_entries = 0; gtt_entries = 0;
break; break;
...@@ -793,7 +838,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, ...@@ -793,7 +838,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
out: out:
ret = 0; ret = 0;
out_err: out_err:
mem->is_flushed = 1; mem->is_flushed = true;
return ret; return ret;
} }
...@@ -1020,7 +1065,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, ...@@ -1020,7 +1065,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
out: out:
ret = 0; ret = 0;
out_err: out_err:
mem->is_flushed = 1; mem->is_flushed = true;
return ret; return ret;
} }
...@@ -1134,53 +1179,64 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, ...@@ -1134,53 +1179,64 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
return addr | bridge->driver->masks[type].mask; return addr | bridge->driver->masks[type].mask;
} }
static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
{
switch (agp_bridge->dev->device) {
case PCI_DEVICE_ID_INTEL_IGD_HB:
case PCI_DEVICE_ID_INTEL_IGD_E_HB:
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
*gtt_offset = *gtt_size = KB(512);
}
}
/* The intel i965 automatically initializes the agp aperture during POST. /* The intel i965 automatically initializes the agp aperture during POST.
* Use the memory already set aside for in the GTT. * Use the memory already set aside for in the GTT.
*/ */
static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
{ {
int page_order; int page_order;
struct aper_size_info_fixed *size; struct aper_size_info_fixed *size;
int num_entries; int num_entries;
u32 temp; u32 temp;
int gtt_offset, gtt_size; int gtt_offset, gtt_size;
size = agp_bridge->current_size; size = agp_bridge->current_size;
page_order = size->page_order; page_order = size->page_order;
num_entries = size->num_entries; num_entries = size->num_entries;
agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table_real = NULL;
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
temp &= 0xfff00000; temp &= 0xfff00000;
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB) intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
gtt_offset = gtt_size = MB(2);
else
gtt_offset = gtt_size = KB(512);
intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
if (!intel_private.gtt) if (!intel_private.gtt)
return -ENOMEM; return -ENOMEM;
intel_private.registers = ioremap(temp, 128 * 4096); intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) { if (!intel_private.registers) {
iounmap(intel_private.gtt); iounmap(intel_private.gtt);
return -ENOMEM; return -ENOMEM;
} }
temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
global_cache_flush(); /* FIXME: ? */ global_cache_flush(); /* FIXME: ? */
/* we have to call this as early as possible after the MMIO base address is known */ /* we have to call this as early as possible after the MMIO base address is known */
intel_i830_init_gtt_entries(); intel_i830_init_gtt_entries();
agp_bridge->gatt_table = NULL; agp_bridge->gatt_table = NULL;
agp_bridge->gatt_bus_addr = temp; agp_bridge->gatt_bus_addr = temp;
return 0; return 0;
} }
...@@ -1656,7 +1712,7 @@ static const struct agp_bridge_driver intel_810_driver = { ...@@ -1656,7 +1712,7 @@ static const struct agp_bridge_driver intel_810_driver = {
.aperture_sizes = intel_i810_sizes, .aperture_sizes = intel_i810_sizes,
.size_type = FIXED_APER_SIZE, .size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 2, .num_aperture_sizes = 2,
.needs_scratch_page = TRUE, .needs_scratch_page = true,
.configure = intel_i810_configure, .configure = intel_i810_configure,
.fetch_size = intel_i810_fetch_size, .fetch_size = intel_i810_fetch_size,
.cleanup = intel_i810_cleanup, .cleanup = intel_i810_cleanup,
...@@ -1697,7 +1753,7 @@ static const struct agp_bridge_driver intel_815_driver = { ...@@ -1697,7 +1753,7 @@ static const struct agp_bridge_driver intel_815_driver = {
.free_by_type = agp_generic_free_by_type, .free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page, .agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page, .agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type,
}; };
static const struct agp_bridge_driver intel_830_driver = { static const struct agp_bridge_driver intel_830_driver = {
...@@ -1705,7 +1761,7 @@ static const struct agp_bridge_driver intel_830_driver = { ...@@ -1705,7 +1761,7 @@ static const struct agp_bridge_driver intel_830_driver = {
.aperture_sizes = intel_i830_sizes, .aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE, .size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 4, .num_aperture_sizes = 4,
.needs_scratch_page = TRUE, .needs_scratch_page = true,
.configure = intel_i830_configure, .configure = intel_i830_configure,
.fetch_size = intel_i830_fetch_size, .fetch_size = intel_i830_fetch_size,
.cleanup = intel_i830_cleanup, .cleanup = intel_i830_cleanup,
...@@ -1876,7 +1932,7 @@ static const struct agp_bridge_driver intel_915_driver = { ...@@ -1876,7 +1932,7 @@ static const struct agp_bridge_driver intel_915_driver = {
.aperture_sizes = intel_i830_sizes, .aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE, .size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 4, .num_aperture_sizes = 4,
.needs_scratch_page = TRUE, .needs_scratch_page = true,
.configure = intel_i915_configure, .configure = intel_i915_configure,
.fetch_size = intel_i9xx_fetch_size, .fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup, .cleanup = intel_i915_cleanup,
...@@ -1898,28 +1954,26 @@ static const struct agp_bridge_driver intel_915_driver = { ...@@ -1898,28 +1954,26 @@ static const struct agp_bridge_driver intel_915_driver = {
}; };
static const struct agp_bridge_driver intel_i965_driver = { static const struct agp_bridge_driver intel_i965_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes, .aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE, .size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 4, .num_aperture_sizes = 4,
.needs_scratch_page = TRUE, .needs_scratch_page = true,
.configure = intel_i915_configure, .cleanup = intel_i915_cleanup,
.fetch_size = intel_i9xx_fetch_size, .tlb_flush = intel_i810_tlbflush,
.cleanup = intel_i915_cleanup, .mask_memory = intel_i965_mask_memory,
.tlb_flush = intel_i810_tlbflush, .masks = intel_i810_masks,
.mask_memory = intel_i965_mask_memory, .agp_enable = intel_i810_agp_enable,
.masks = intel_i810_masks, .cache_flush = global_cache_flush,
.agp_enable = intel_i810_agp_enable, .create_gatt_table = intel_i965_create_gatt_table,
.cache_flush = global_cache_flush, .free_gatt_table = intel_i830_free_gatt_table,
.create_gatt_table = intel_i965_create_gatt_table, .insert_memory = intel_i915_insert_entries,
.free_gatt_table = intel_i830_free_gatt_table, .remove_memory = intel_i915_remove_entries,
.insert_memory = intel_i915_insert_entries, .alloc_by_type = intel_i830_alloc_by_type,
.remove_memory = intel_i915_remove_entries, .free_by_type = intel_i810_free_by_type,
.alloc_by_type = intel_i830_alloc_by_type, .agp_alloc_page = agp_generic_alloc_page,
.free_by_type = intel_i810_free_by_type, .agp_destroy_page = agp_generic_destroy_page,
.agp_alloc_page = agp_generic_alloc_page, .agp_type_to_mask_type = intel_i830_type_to_mask_type,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush, .chipset_flush = intel_i915_chipset_flush,
}; };
...@@ -1948,28 +2002,28 @@ static const struct agp_bridge_driver intel_7505_driver = { ...@@ -1948,28 +2002,28 @@ static const struct agp_bridge_driver intel_7505_driver = {
}; };
static const struct agp_bridge_driver intel_g33_driver = { static const struct agp_bridge_driver intel_g33_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes, .aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE, .size_type = FIXED_APER_SIZE,
.num_aperture_sizes = 4, .num_aperture_sizes = 4,
.needs_scratch_page = TRUE, .needs_scratch_page = true,
.configure = intel_i915_configure, .configure = intel_i915_configure,
.fetch_size = intel_i9xx_fetch_size, .fetch_size = intel_i9xx_fetch_size,
.cleanup = intel_i915_cleanup, .cleanup = intel_i915_cleanup,
.tlb_flush = intel_i810_tlbflush, .tlb_flush = intel_i810_tlbflush,
.mask_memory = intel_i965_mask_memory, .mask_memory = intel_i965_mask_memory,
.masks = intel_i810_masks, .masks = intel_i810_masks,
.agp_enable = intel_i810_agp_enable, .agp_enable = intel_i810_agp_enable,
.cache_flush = global_cache_flush, .cache_flush = global_cache_flush,
.create_gatt_table = intel_i915_create_gatt_table, .create_gatt_table = intel_i915_create_gatt_table,
.free_gatt_table = intel_i830_free_gatt_table, .free_gatt_table = intel_i830_free_gatt_table,
.insert_memory = intel_i915_insert_entries, .insert_memory = intel_i915_insert_entries,
.remove_memory = intel_i915_remove_entries, .remove_memory = intel_i915_remove_entries,
.alloc_by_type = intel_i830_alloc_by_type, .alloc_by_type = intel_i830_alloc_by_type,
.free_by_type = intel_i810_free_by_type, .free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page, .agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page, .agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type, .agp_type_to_mask_type = intel_i830_type_to_mask_type,
.chipset_flush = intel_i915_chipset_flush, .chipset_flush = intel_i915_chipset_flush,
}; };
...@@ -2063,6 +2117,12 @@ static const struct intel_driver_description { ...@@ -2063,6 +2117,12 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver }, NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0, { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0,
"Intel Integrated Graphics Device", NULL, &intel_i965_driver }, "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
"Intel Integrated Graphics Device", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
"Q45/Q43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
"G45/G43", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL } { 0, 0, 0, NULL, NULL, NULL }
}; };
...@@ -2254,6 +2314,9 @@ static struct pci_device_id agp_intel_pci_table[] = { ...@@ -2254,6 +2314,9 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_Q35_HB), ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB), ID(PCI_DEVICE_ID_INTEL_Q33_HB),
ID(PCI_DEVICE_ID_INTEL_IGD_HB), ID(PCI_DEVICE_ID_INTEL_IGD_HB),
ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
{ } { }
}; };
......
...@@ -214,9 +214,9 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type ...@@ -214,9 +214,9 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
return -EBUSY; return -EBUSY;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge, writel(agp_bridge->driver->mask_memory(agp_bridge,
......
...@@ -141,9 +141,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) ...@@ -141,9 +141,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = io_pg_start; i < mem->page_count; i++) { for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
...@@ -226,7 +226,7 @@ static const struct agp_bridge_driver parisc_agp_driver = { ...@@ -226,7 +226,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
.agp_alloc_page = agp_generic_alloc_page, .agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page, .agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1, .cant_use_aperture = true,
}; };
static int __init static int __init
......
...@@ -182,9 +182,9 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start, ...@@ -182,9 +182,9 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
bridge->driver->cache_flush(); bridge->driver->cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
...@@ -264,8 +264,8 @@ const struct agp_bridge_driver sgi_tioca_driver = { ...@@ -264,8 +264,8 @@ const struct agp_bridge_driver sgi_tioca_driver = {
.agp_alloc_page = sgi_tioca_alloc_page, .agp_alloc_page = sgi_tioca_alloc_page,
.agp_destroy_page = agp_generic_destroy_page, .agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1, .cant_use_aperture = true,
.needs_scratch_page = 0, .needs_scratch_page = false,
.num_aperture_sizes = 1, .num_aperture_sizes = 1,
}; };
......
...@@ -339,9 +339,9 @@ static int serverworks_insert_memory(struct agp_memory *mem, ...@@ -339,9 +339,9 @@ static int serverworks_insert_memory(struct agp_memory *mem,
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (!mem->is_flushed) {
global_cache_flush(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = true;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
...@@ -412,7 +412,7 @@ static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode) ...@@ -412,7 +412,7 @@ static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
bridge->capndx + PCI_AGP_COMMAND, bridge->capndx + PCI_AGP_COMMAND,
command); command);
agp_device_command(command, 0); agp_device_command(command, false);
} }
static const struct agp_bridge_driver sworks_driver = { static const struct agp_bridge_driver sworks_driver = {
......
...@@ -281,10 +281,10 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode) ...@@ -281,10 +281,10 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
if (uninorth_rev >= 0x30) { if (uninorth_rev >= 0x30) {
/* This is an AGP V3 */ /* This is an AGP V3 */
agp_device_command(command, (status & AGPSTAT_MODE_3_0)); agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0);
} else { } else {
/* AGP V2 */ /* AGP V2 */
agp_device_command(command, 0); agp_device_command(command, false);
} }
uninorth_tlbflush(NULL); uninorth_tlbflush(NULL);
...@@ -511,7 +511,7 @@ const struct agp_bridge_driver uninorth_agp_driver = { ...@@ -511,7 +511,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
.agp_alloc_page = agp_generic_alloc_page, .agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page, .agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1, .cant_use_aperture = true,
}; };
const struct agp_bridge_driver u3_agp_driver = { const struct agp_bridge_driver u3_agp_driver = {
...@@ -536,8 +536,8 @@ const struct agp_bridge_driver u3_agp_driver = { ...@@ -536,8 +536,8 @@ const struct agp_bridge_driver u3_agp_driver = {
.agp_alloc_page = agp_generic_alloc_page, .agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page, .agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type, .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1, .cant_use_aperture = true,
.needs_scratch_page = 1, .needs_scratch_page = true,
}; };
static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = { static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
......
...@@ -389,11 +389,20 @@ static struct agp_device_ids via_agp_device_ids[] __devinitdata = ...@@ -389,11 +389,20 @@ static struct agp_device_ids via_agp_device_ids[] __devinitdata =
.device_id = PCI_DEVICE_ID_VIA_VT3324, .device_id = PCI_DEVICE_ID_VIA_VT3324,
.chipset_name = "CX700", .chipset_name = "CX700",
}, },
/* VT3336 */ /* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique
* architecture, the AGP resource and behavior are different from
* the traditional AGP which resides only in chipset. AGP is used
* by 3D driver which wasn't available for the VT3336 and VT3364
* generation until now. Unfortunately, by testing, VT3364 works
* but VT3336 doesn't. - explaination from via, just leave this as
* as a placeholder to avoid future patches adding it back in.
*/
#if 0
{ {
.device_id = PCI_DEVICE_ID_VIA_VT3336, .device_id = PCI_DEVICE_ID_VIA_VT3336,
.chipset_name = "VT3336", .chipset_name = "VT3336",
}, },
#endif
/* P4M890 */ /* P4M890 */
{ {
.device_id = PCI_DEVICE_ID_VIA_P4M890, .device_id = PCI_DEVICE_ID_VIA_P4M890,
...@@ -546,8 +555,8 @@ static const struct pci_device_id agp_via_pci_table[] = { ...@@ -546,8 +555,8 @@ static const struct pci_device_id agp_via_pci_table[] = {
ID(PCI_DEVICE_ID_VIA_3296_0), ID(PCI_DEVICE_ID_VIA_3296_0),
ID(PCI_DEVICE_ID_VIA_P4M800CE), ID(PCI_DEVICE_ID_VIA_P4M800CE),
ID(PCI_DEVICE_ID_VIA_VT3324), ID(PCI_DEVICE_ID_VIA_VT3324),
ID(PCI_DEVICE_ID_VIA_VT3336),
ID(PCI_DEVICE_ID_VIA_P4M890), ID(PCI_DEVICE_ID_VIA_P4M890),
ID(PCI_DEVICE_ID_VIA_VT3364),
{ } { }
}; };
......
...@@ -30,14 +30,6 @@ ...@@ -30,14 +30,6 @@
#ifndef _AGP_BACKEND_H #ifndef _AGP_BACKEND_H
#define _AGP_BACKEND_H 1 #define _AGP_BACKEND_H 1
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
enum chipset_type { enum chipset_type {
NOT_SUPPORTED, NOT_SUPPORTED,
SUPPORTED, SUPPORTED,
...@@ -57,7 +49,7 @@ struct agp_kern_info { ...@@ -57,7 +49,7 @@ struct agp_kern_info {
size_t aper_size; size_t aper_size;
int max_memory; /* In pages */ int max_memory; /* In pages */
int current_memory; int current_memory;
int cant_use_aperture; bool cant_use_aperture;
unsigned long page_mask; unsigned long page_mask;
struct vm_operations_struct *vm_ops; struct vm_operations_struct *vm_ops;
}; };
...@@ -83,9 +75,9 @@ struct agp_memory { ...@@ -83,9 +75,9 @@ struct agp_memory {
off_t pg_start; off_t pg_start;
u32 type; u32 type;
u32 physical; u32 physical;
u8 is_bound; bool is_bound;
u8 is_flushed; bool is_flushed;
u8 vmalloc_flag; bool vmalloc_flag;
}; };
#define AGP_NORMAL_MEMORY 0 #define AGP_NORMAL_MEMORY 0
......
...@@ -206,8 +206,8 @@ struct agp_front_data { ...@@ -206,8 +206,8 @@ struct agp_front_data {
struct agp_controller *current_controller; struct agp_controller *current_controller;
struct agp_controller *controllers; struct agp_controller *controllers;
struct agp_file_private *file_priv_list; struct agp_file_private *file_priv_list;
u8 used_by_controller; bool used_by_controller;
u8 backend_acquired; bool backend_acquired;
}; };
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册