Merge branch 'agp-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6
* 'agp-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6: agp/intel: cleanup some serious whitespace badness [AGP] intel_agp: Add support for Intel 4 series chipsets [AGP] intel_agp: extra stolen mem size available for IGD_GM chipset agp: more boolean conversions. drivers/char/agp - use bool agp: two-stage page destruction issue agp/via: fixup pci ids
This commit is contained in:
commit
3506ba7b08
|
@ -99,8 +99,8 @@ struct agp_bridge_driver {
|
|||
const void *aperture_sizes;
|
||||
int num_aperture_sizes;
|
||||
enum aper_size_type size_type;
|
||||
int cant_use_aperture;
|
||||
int needs_scratch_page;
|
||||
bool cant_use_aperture;
|
||||
bool needs_scratch_page;
|
||||
const struct gatt_mask *masks;
|
||||
int (*fetch_size)(void);
|
||||
int (*configure)(void);
|
||||
|
@ -278,7 +278,7 @@ void agp_generic_destroy_page(void *addr, int flags);
|
|||
void agp_free_key(int key);
|
||||
int agp_num_entries(void);
|
||||
u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
|
||||
void agp_device_command(u32 command, int agp_v3);
|
||||
void agp_device_command(u32 command, bool agp_v3);
|
||||
int agp_3_5_enable(struct agp_bridge_data *bridge);
|
||||
void global_cache_flush(void);
|
||||
void get_agp_version(struct agp_bridge_data *bridge);
|
||||
|
|
|
@ -80,7 +80,7 @@ static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
|
|||
agp->mode.bits.enable = 1;
|
||||
agp->ops->configure(agp);
|
||||
|
||||
agp_device_command(agp->mode.lw, 0);
|
||||
agp_device_command(agp->mode.lw, false);
|
||||
}
|
||||
|
||||
static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
|
||||
|
@ -126,7 +126,7 @@ struct agp_bridge_driver alpha_core_agp_driver = {
|
|||
.aperture_sizes = alpha_core_agp_sizes,
|
||||
.num_aperture_sizes = 1,
|
||||
.size_type = FIXED_APER_SIZE,
|
||||
.cant_use_aperture = 1,
|
||||
.cant_use_aperture = true,
|
||||
.masks = NULL,
|
||||
|
||||
.fetch_size = alpha_core_agp_fetch_size,
|
||||
|
|
|
@ -314,9 +314,9 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
|
||||
|
|
|
@ -90,9 +90,9 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
|
||||
|
|
|
@ -287,10 +287,10 @@ static int ati_insert_memory(struct agp_memory * mem,
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
/*CACHE_FLUSH(); */
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
|
||||
|
|
|
@ -188,10 +188,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
|
|||
|
||||
err_out:
|
||||
if (bridge->driver->needs_scratch_page) {
|
||||
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
|
||||
AGP_PAGE_DESTROY_UNMAP);
|
||||
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
|
||||
AGP_PAGE_DESTROY_FREE);
|
||||
void *va = gart_to_virt(bridge->scratch_page_real);
|
||||
|
||||
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
|
||||
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
|
||||
}
|
||||
if (got_gatt)
|
||||
bridge->driver->free_gatt_table(bridge);
|
||||
|
@ -215,10 +215,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
|
|||
|
||||
if (bridge->driver->agp_destroy_page &&
|
||||
bridge->driver->needs_scratch_page) {
|
||||
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
|
||||
AGP_PAGE_DESTROY_UNMAP);
|
||||
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
|
||||
AGP_PAGE_DESTROY_FREE);
|
||||
void *va = gart_to_virt(bridge->scratch_page_real);
|
||||
|
||||
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
|
||||
bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -214,7 +214,7 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
ret_val = -EINVAL;
|
||||
goto ioctl_out;
|
||||
}
|
||||
if ((agp_fe.backend_acquired != TRUE) &&
|
||||
if ((agp_fe.backend_acquired != true) &&
|
||||
(cmd != AGPIOC_ACQUIRE32)) {
|
||||
ret_val = -EBUSY;
|
||||
goto ioctl_out;
|
||||
|
|
|
@ -249,9 +249,9 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t
|
|||
if (type != 0 || mem->type != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
last_page = NULL;
|
||||
|
@ -329,7 +329,7 @@ static const struct agp_bridge_driver efficeon_driver = {
|
|||
.free_gatt_table = efficeon_free_gatt_table,
|
||||
.insert_memory = efficeon_insert_memory,
|
||||
.remove_memory = efficeon_remove_memory,
|
||||
.cant_use_aperture = 0, // 1 might be faster?
|
||||
.cant_use_aperture = false, // true might be faster?
|
||||
|
||||
// Generic
|
||||
.alloc_by_type = agp_generic_alloc_by_type,
|
||||
|
|
|
@ -395,7 +395,7 @@ static int agp_remove_controller(struct agp_controller *controller)
|
|||
|
||||
if (agp_fe.current_controller == controller) {
|
||||
agp_fe.current_controller = NULL;
|
||||
agp_fe.backend_acquired = FALSE;
|
||||
agp_fe.backend_acquired = false;
|
||||
agp_backend_release(agp_bridge);
|
||||
}
|
||||
kfree(controller);
|
||||
|
@ -443,7 +443,7 @@ static void agp_controller_release_current(struct agp_controller *controller,
|
|||
}
|
||||
|
||||
agp_fe.current_controller = NULL;
|
||||
agp_fe.used_by_controller = FALSE;
|
||||
agp_fe.used_by_controller = false;
|
||||
agp_backend_release(agp_bridge);
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
|
||||
mutex_lock(&(agp_fe.agp_mutex));
|
||||
|
||||
if (agp_fe.backend_acquired != TRUE)
|
||||
if (agp_fe.backend_acquired != true)
|
||||
goto out_eperm;
|
||||
|
||||
if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
|
||||
|
@ -768,7 +768,7 @@ int agpioc_acquire_wrap(struct agp_file_private *priv)
|
|||
|
||||
atomic_inc(&agp_bridge->agp_in_use);
|
||||
|
||||
agp_fe.backend_acquired = TRUE;
|
||||
agp_fe.backend_acquired = true;
|
||||
|
||||
controller = agp_find_controller_by_pid(priv->my_pid);
|
||||
|
||||
|
@ -778,7 +778,7 @@ int agpioc_acquire_wrap(struct agp_file_private *priv)
|
|||
controller = agp_create_controller(priv->my_pid);
|
||||
|
||||
if (controller == NULL) {
|
||||
agp_fe.backend_acquired = FALSE;
|
||||
agp_fe.backend_acquired = false;
|
||||
agp_backend_release(agp_bridge);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -981,7 +981,7 @@ static long agp_ioctl(struct file *file,
|
|||
ret_val = -EINVAL;
|
||||
goto ioctl_out;
|
||||
}
|
||||
if ((agp_fe.backend_acquired != TRUE) &&
|
||||
if ((agp_fe.backend_acquired != true) &&
|
||||
(cmd != AGPIOC_ACQUIRE)) {
|
||||
ret_val = -EBUSY;
|
||||
goto ioctl_out;
|
||||
|
|
|
@ -96,13 +96,13 @@ EXPORT_SYMBOL(agp_flush_chipset);
|
|||
void agp_alloc_page_array(size_t size, struct agp_memory *mem)
|
||||
{
|
||||
mem->memory = NULL;
|
||||
mem->vmalloc_flag = 0;
|
||||
mem->vmalloc_flag = false;
|
||||
|
||||
if (size <= 2*PAGE_SIZE)
|
||||
mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
|
||||
if (mem->memory == NULL) {
|
||||
mem->memory = vmalloc(size);
|
||||
mem->vmalloc_flag = 1;
|
||||
mem->vmalloc_flag = true;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(agp_alloc_page_array);
|
||||
|
@ -188,7 +188,7 @@ void agp_free_memory(struct agp_memory *curr)
|
|||
if (curr == NULL)
|
||||
return;
|
||||
|
||||
if (curr->is_bound == TRUE)
|
||||
if (curr->is_bound)
|
||||
agp_unbind_memory(curr);
|
||||
|
||||
if (curr->type >= AGP_USER_TYPES) {
|
||||
|
@ -202,10 +202,13 @@ void agp_free_memory(struct agp_memory *curr)
|
|||
}
|
||||
if (curr->page_count != 0) {
|
||||
for (i = 0; i < curr->page_count; i++) {
|
||||
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
|
||||
curr->memory[i] = (unsigned long)gart_to_virt(curr->memory[i]);
|
||||
curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
|
||||
AGP_PAGE_DESTROY_UNMAP);
|
||||
}
|
||||
for (i = 0; i < curr->page_count; i++) {
|
||||
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
|
||||
curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
|
||||
AGP_PAGE_DESTROY_FREE);
|
||||
}
|
||||
}
|
||||
agp_free_key(curr->key);
|
||||
|
@ -411,20 +414,20 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
|
|||
if (curr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (curr->is_bound == TRUE) {
|
||||
if (curr->is_bound) {
|
||||
printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (curr->is_flushed == FALSE) {
|
||||
if (!curr->is_flushed) {
|
||||
curr->bridge->driver->cache_flush();
|
||||
curr->is_flushed = TRUE;
|
||||
curr->is_flushed = true;
|
||||
}
|
||||
ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
|
||||
|
||||
if (ret_val != 0)
|
||||
return ret_val;
|
||||
|
||||
curr->is_bound = TRUE;
|
||||
curr->is_bound = true;
|
||||
curr->pg_start = pg_start;
|
||||
return 0;
|
||||
}
|
||||
|
@ -446,7 +449,7 @@ int agp_unbind_memory(struct agp_memory *curr)
|
|||
if (curr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (curr->is_bound != TRUE) {
|
||||
if (!curr->is_bound) {
|
||||
printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -456,7 +459,7 @@ int agp_unbind_memory(struct agp_memory *curr)
|
|||
if (ret_val != 0)
|
||||
return ret_val;
|
||||
|
||||
curr->is_bound = FALSE;
|
||||
curr->is_bound = false;
|
||||
curr->pg_start = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@ -754,7 +757,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode
|
|||
EXPORT_SYMBOL(agp_collect_device_status);
|
||||
|
||||
|
||||
void agp_device_command(u32 bridge_agpstat, int agp_v3)
|
||||
void agp_device_command(u32 bridge_agpstat, bool agp_v3)
|
||||
{
|
||||
struct pci_dev *device = NULL;
|
||||
int mode;
|
||||
|
@ -818,7 +821,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
|
|||
/* If we have 3.5, we can do the isoch stuff. */
|
||||
if (bridge->minor_version >= 5)
|
||||
agp_3_5_enable(bridge);
|
||||
agp_device_command(bridge_agpstat, TRUE);
|
||||
agp_device_command(bridge_agpstat, true);
|
||||
return;
|
||||
} else {
|
||||
/* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
|
||||
|
@ -835,7 +838,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
|
|||
}
|
||||
|
||||
/* AGP v<3 */
|
||||
agp_device_command(bridge_agpstat, FALSE);
|
||||
agp_device_command(bridge_agpstat, false);
|
||||
}
|
||||
EXPORT_SYMBOL(agp_generic_enable);
|
||||
|
||||
|
@ -1083,9 +1086,9 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
bridge->driver->cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
|
||||
|
|
|
@ -353,9 +353,9 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
|
||||
|
@ -437,7 +437,7 @@ const struct agp_bridge_driver hp_zx1_driver = {
|
|||
.agp_alloc_page = agp_generic_alloc_page,
|
||||
.agp_destroy_page = agp_generic_destroy_page,
|
||||
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
|
||||
.cant_use_aperture = 1,
|
||||
.cant_use_aperture = true,
|
||||
};
|
||||
|
||||
static int __init
|
||||
|
|
|
@ -580,7 +580,7 @@ const struct agp_bridge_driver intel_i460_driver = {
|
|||
.alloc_by_type = agp_generic_alloc_by_type,
|
||||
.free_by_type = agp_generic_free_by_type,
|
||||
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
|
||||
.cant_use_aperture = 1,
|
||||
.cant_use_aperture = true,
|
||||
};
|
||||
|
||||
static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
|
||||
|
|
|
@ -34,6 +34,12 @@
|
|||
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
|
||||
#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40
|
||||
#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42
|
||||
#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
|
||||
#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
|
||||
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
|
||||
#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
|
||||
#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
|
||||
#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
|
||||
|
||||
/* cover 915 and 945 variants */
|
||||
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
|
||||
|
@ -55,6 +61,10 @@
|
|||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
|
||||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
|
||||
|
||||
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
|
||||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
|
||||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB)
|
||||
|
||||
extern int agp_memory_reserved;
|
||||
|
||||
|
||||
|
@ -82,6 +92,11 @@ extern int agp_memory_reserved;
|
|||
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
|
||||
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
|
||||
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
|
||||
#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
|
||||
#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
|
||||
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
|
||||
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
|
||||
|
||||
#define I915_IFPADDR 0x60
|
||||
|
||||
/* Intel 965G registers */
|
||||
|
@ -325,7 +340,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
|
|||
out:
|
||||
ret = 0;
|
||||
out_err:
|
||||
mem->is_flushed = 1;
|
||||
mem->is_flushed = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -418,9 +433,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
|
|||
if (curr->page_count == 4)
|
||||
i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
|
||||
else {
|
||||
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
|
||||
void *va = gart_to_virt(curr->memory[0]);
|
||||
|
||||
agp_bridge->driver->agp_destroy_page(va,
|
||||
AGP_PAGE_DESTROY_UNMAP);
|
||||
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
|
||||
agp_bridge->driver->agp_destroy_page(va,
|
||||
AGP_PAGE_DESTROY_FREE);
|
||||
}
|
||||
agp_free_page_array(curr);
|
||||
|
@ -504,6 +521,10 @@ static void intel_i830_init_gtt_entries(void)
|
|||
size = 512;
|
||||
}
|
||||
size += 4;
|
||||
} else if (IS_G4X) {
|
||||
/* On 4 series hardware, GTT stolen is separate from graphics
|
||||
* stolen, ignore it in stolen gtt entries counting */
|
||||
size = 0;
|
||||
} else {
|
||||
/* On previous hardware, the GTT size was just what was
|
||||
* required to map the aperture.
|
||||
|
@ -552,30 +573,54 @@ static void intel_i830_init_gtt_entries(void)
|
|||
break;
|
||||
case I915_GMCH_GMS_STOLEN_48M:
|
||||
/* Check it's really I915G */
|
||||
if (IS_I915 || IS_I965 || IS_G33)
|
||||
if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
|
||||
gtt_entries = MB(48) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
case I915_GMCH_GMS_STOLEN_64M:
|
||||
/* Check it's really I915G */
|
||||
if (IS_I915 || IS_I965 || IS_G33)
|
||||
if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
|
||||
gtt_entries = MB(64) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
case G33_GMCH_GMS_STOLEN_128M:
|
||||
if (IS_G33)
|
||||
if (IS_G33 || IS_I965 || IS_G4X)
|
||||
gtt_entries = MB(128) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
case G33_GMCH_GMS_STOLEN_256M:
|
||||
if (IS_G33)
|
||||
if (IS_G33 || IS_I965 || IS_G4X)
|
||||
gtt_entries = MB(256) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
case INTEL_GMCH_GMS_STOLEN_96M:
|
||||
if (IS_I965 || IS_G4X)
|
||||
gtt_entries = MB(96) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
case INTEL_GMCH_GMS_STOLEN_160M:
|
||||
if (IS_I965 || IS_G4X)
|
||||
gtt_entries = MB(160) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
case INTEL_GMCH_GMS_STOLEN_224M:
|
||||
if (IS_I965 || IS_G4X)
|
||||
gtt_entries = MB(224) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
case INTEL_GMCH_GMS_STOLEN_352M:
|
||||
if (IS_I965 || IS_G4X)
|
||||
gtt_entries = MB(352) - KB(size);
|
||||
else
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
default:
|
||||
gtt_entries = 0;
|
||||
break;
|
||||
|
@ -793,7 +838,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
|
|||
out:
|
||||
ret = 0;
|
||||
out_err:
|
||||
mem->is_flushed = 1;
|
||||
mem->is_flushed = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1020,7 +1065,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
|
|||
out:
|
||||
ret = 0;
|
||||
out_err:
|
||||
mem->is_flushed = 1;
|
||||
mem->is_flushed = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1134,6 +1179,20 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
|
|||
return addr | bridge->driver->masks[type].mask;
|
||||
}
|
||||
|
||||
static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
|
||||
{
|
||||
switch (agp_bridge->dev->device) {
|
||||
case PCI_DEVICE_ID_INTEL_IGD_HB:
|
||||
case PCI_DEVICE_ID_INTEL_IGD_E_HB:
|
||||
case PCI_DEVICE_ID_INTEL_Q45_HB:
|
||||
case PCI_DEVICE_ID_INTEL_G45_HB:
|
||||
*gtt_offset = *gtt_size = MB(2);
|
||||
break;
|
||||
default:
|
||||
*gtt_offset = *gtt_size = KB(512);
|
||||
}
|
||||
}
|
||||
|
||||
/* The intel i965 automatically initializes the agp aperture during POST.
|
||||
* Use the memory already set aside for in the GTT.
|
||||
*/
|
||||
|
@ -1154,10 +1213,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
|
|||
|
||||
temp &= 0xfff00000;
|
||||
|
||||
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB)
|
||||
gtt_offset = gtt_size = MB(2);
|
||||
else
|
||||
gtt_offset = gtt_size = KB(512);
|
||||
intel_i965_get_gtt_range(>t_offset, >t_size);
|
||||
|
||||
intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
|
||||
|
||||
|
@ -1656,7 +1712,7 @@ static const struct agp_bridge_driver intel_810_driver = {
|
|||
.aperture_sizes = intel_i810_sizes,
|
||||
.size_type = FIXED_APER_SIZE,
|
||||
.num_aperture_sizes = 2,
|
||||
.needs_scratch_page = TRUE,
|
||||
.needs_scratch_page = true,
|
||||
.configure = intel_i810_configure,
|
||||
.fetch_size = intel_i810_fetch_size,
|
||||
.cleanup = intel_i810_cleanup,
|
||||
|
@ -1705,7 +1761,7 @@ static const struct agp_bridge_driver intel_830_driver = {
|
|||
.aperture_sizes = intel_i830_sizes,
|
||||
.size_type = FIXED_APER_SIZE,
|
||||
.num_aperture_sizes = 4,
|
||||
.needs_scratch_page = TRUE,
|
||||
.needs_scratch_page = true,
|
||||
.configure = intel_i830_configure,
|
||||
.fetch_size = intel_i830_fetch_size,
|
||||
.cleanup = intel_i830_cleanup,
|
||||
|
@ -1876,7 +1932,7 @@ static const struct agp_bridge_driver intel_915_driver = {
|
|||
.aperture_sizes = intel_i830_sizes,
|
||||
.size_type = FIXED_APER_SIZE,
|
||||
.num_aperture_sizes = 4,
|
||||
.needs_scratch_page = TRUE,
|
||||
.needs_scratch_page = true,
|
||||
.configure = intel_i915_configure,
|
||||
.fetch_size = intel_i9xx_fetch_size,
|
||||
.cleanup = intel_i915_cleanup,
|
||||
|
@ -1902,9 +1958,7 @@ static const struct agp_bridge_driver intel_i965_driver = {
|
|||
.aperture_sizes = intel_i830_sizes,
|
||||
.size_type = FIXED_APER_SIZE,
|
||||
.num_aperture_sizes = 4,
|
||||
.needs_scratch_page = TRUE,
|
||||
.configure = intel_i915_configure,
|
||||
.fetch_size = intel_i9xx_fetch_size,
|
||||
.needs_scratch_page = true,
|
||||
.cleanup = intel_i915_cleanup,
|
||||
.tlb_flush = intel_i810_tlbflush,
|
||||
.mask_memory = intel_i965_mask_memory,
|
||||
|
@ -1952,7 +2006,7 @@ static const struct agp_bridge_driver intel_g33_driver = {
|
|||
.aperture_sizes = intel_i830_sizes,
|
||||
.size_type = FIXED_APER_SIZE,
|
||||
.num_aperture_sizes = 4,
|
||||
.needs_scratch_page = TRUE,
|
||||
.needs_scratch_page = true,
|
||||
.configure = intel_i915_configure,
|
||||
.fetch_size = intel_i9xx_fetch_size,
|
||||
.cleanup = intel_i915_cleanup,
|
||||
|
@ -2063,6 +2117,12 @@ static const struct intel_driver_description {
|
|||
NULL, &intel_g33_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0,
|
||||
"Intel Integrated Graphics Device", NULL, &intel_i965_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
|
||||
"Intel Integrated Graphics Device", NULL, &intel_i965_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
|
||||
"Q45/Q43", NULL, &intel_i965_driver },
|
||||
{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
|
||||
"G45/G43", NULL, &intel_i965_driver },
|
||||
{ 0, 0, 0, NULL, NULL, NULL }
|
||||
};
|
||||
|
||||
|
@ -2254,6 +2314,9 @@ static struct pci_device_id agp_intel_pci_table[] = {
|
|||
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IGD_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
|
||||
ID(PCI_DEVICE_ID_INTEL_G45_HB),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -214,9 +214,9 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
|
||||
writel(agp_bridge->driver->mask_memory(agp_bridge,
|
||||
|
|
|
@ -141,9 +141,9 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
|
||||
|
@ -226,7 +226,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
|
|||
.agp_alloc_page = agp_generic_alloc_page,
|
||||
.agp_destroy_page = agp_generic_destroy_page,
|
||||
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
|
||||
.cant_use_aperture = 1,
|
||||
.cant_use_aperture = true,
|
||||
};
|
||||
|
||||
static int __init
|
||||
|
|
|
@ -182,9 +182,9 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
bridge->driver->cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
|
||||
|
@ -264,8 +264,8 @@ const struct agp_bridge_driver sgi_tioca_driver = {
|
|||
.agp_alloc_page = sgi_tioca_alloc_page,
|
||||
.agp_destroy_page = agp_generic_destroy_page,
|
||||
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
|
||||
.cant_use_aperture = 1,
|
||||
.needs_scratch_page = 0,
|
||||
.cant_use_aperture = true,
|
||||
.needs_scratch_page = false,
|
||||
.num_aperture_sizes = 1,
|
||||
};
|
||||
|
||||
|
|
|
@ -339,9 +339,9 @@ static int serverworks_insert_memory(struct agp_memory *mem,
|
|||
j++;
|
||||
}
|
||||
|
||||
if (mem->is_flushed == FALSE) {
|
||||
if (!mem->is_flushed) {
|
||||
global_cache_flush();
|
||||
mem->is_flushed = TRUE;
|
||||
mem->is_flushed = true;
|
||||
}
|
||||
|
||||
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
|
||||
|
@ -412,7 +412,7 @@ static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
|
|||
bridge->capndx + PCI_AGP_COMMAND,
|
||||
command);
|
||||
|
||||
agp_device_command(command, 0);
|
||||
agp_device_command(command, false);
|
||||
}
|
||||
|
||||
static const struct agp_bridge_driver sworks_driver = {
|
||||
|
|
|
@ -281,10 +281,10 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
|
|||
|
||||
if (uninorth_rev >= 0x30) {
|
||||
/* This is an AGP V3 */
|
||||
agp_device_command(command, (status & AGPSTAT_MODE_3_0));
|
||||
agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0);
|
||||
} else {
|
||||
/* AGP V2 */
|
||||
agp_device_command(command, 0);
|
||||
agp_device_command(command, false);
|
||||
}
|
||||
|
||||
uninorth_tlbflush(NULL);
|
||||
|
@ -511,7 +511,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
|
|||
.agp_alloc_page = agp_generic_alloc_page,
|
||||
.agp_destroy_page = agp_generic_destroy_page,
|
||||
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
|
||||
.cant_use_aperture = 1,
|
||||
.cant_use_aperture = true,
|
||||
};
|
||||
|
||||
const struct agp_bridge_driver u3_agp_driver = {
|
||||
|
@ -536,8 +536,8 @@ const struct agp_bridge_driver u3_agp_driver = {
|
|||
.agp_alloc_page = agp_generic_alloc_page,
|
||||
.agp_destroy_page = agp_generic_destroy_page,
|
||||
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
|
||||
.cant_use_aperture = 1,
|
||||
.needs_scratch_page = 1,
|
||||
.cant_use_aperture = true,
|
||||
.needs_scratch_page = true,
|
||||
};
|
||||
|
||||
static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
|
||||
|
|
|
@ -389,11 +389,20 @@ static struct agp_device_ids via_agp_device_ids[] __devinitdata =
|
|||
.device_id = PCI_DEVICE_ID_VIA_VT3324,
|
||||
.chipset_name = "CX700",
|
||||
},
|
||||
/* VT3336 */
|
||||
/* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique
|
||||
* architecture, the AGP resource and behavior are different from
|
||||
* the traditional AGP which resides only in chipset. AGP is used
|
||||
* by 3D driver which wasn't available for the VT3336 and VT3364
|
||||
* generation until now. Unfortunately, by testing, VT3364 works
|
||||
* but VT3336 doesn't. - explaination from via, just leave this as
|
||||
* as a placeholder to avoid future patches adding it back in.
|
||||
*/
|
||||
#if 0
|
||||
{
|
||||
.device_id = PCI_DEVICE_ID_VIA_VT3336,
|
||||
.chipset_name = "VT3336",
|
||||
},
|
||||
#endif
|
||||
/* P4M890 */
|
||||
{
|
||||
.device_id = PCI_DEVICE_ID_VIA_P4M890,
|
||||
|
@ -546,8 +555,8 @@ static const struct pci_device_id agp_via_pci_table[] = {
|
|||
ID(PCI_DEVICE_ID_VIA_3296_0),
|
||||
ID(PCI_DEVICE_ID_VIA_P4M800CE),
|
||||
ID(PCI_DEVICE_ID_VIA_VT3324),
|
||||
ID(PCI_DEVICE_ID_VIA_VT3336),
|
||||
ID(PCI_DEVICE_ID_VIA_P4M890),
|
||||
ID(PCI_DEVICE_ID_VIA_VT3364),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -30,14 +30,6 @@
|
|||
#ifndef _AGP_BACKEND_H
|
||||
#define _AGP_BACKEND_H 1
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE 1
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
enum chipset_type {
|
||||
NOT_SUPPORTED,
|
||||
SUPPORTED,
|
||||
|
@ -57,7 +49,7 @@ struct agp_kern_info {
|
|||
size_t aper_size;
|
||||
int max_memory; /* In pages */
|
||||
int current_memory;
|
||||
int cant_use_aperture;
|
||||
bool cant_use_aperture;
|
||||
unsigned long page_mask;
|
||||
struct vm_operations_struct *vm_ops;
|
||||
};
|
||||
|
@ -83,9 +75,9 @@ struct agp_memory {
|
|||
off_t pg_start;
|
||||
u32 type;
|
||||
u32 physical;
|
||||
u8 is_bound;
|
||||
u8 is_flushed;
|
||||
u8 vmalloc_flag;
|
||||
bool is_bound;
|
||||
bool is_flushed;
|
||||
bool vmalloc_flag;
|
||||
};
|
||||
|
||||
#define AGP_NORMAL_MEMORY 0
|
||||
|
|
|
@ -206,8 +206,8 @@ struct agp_front_data {
|
|||
struct agp_controller *current_controller;
|
||||
struct agp_controller *controllers;
|
||||
struct agp_file_private *file_priv_list;
|
||||
u8 used_by_controller;
|
||||
u8 backend_acquired;
|
||||
bool used_by_controller;
|
||||
bool backend_acquired;
|
||||
};
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
Loading…
Reference in New Issue