diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/Kconfig agpgart/drivers/char/agp/Kconfig --- bk-linus/drivers/char/agp/Kconfig 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/Kconfig 2002-12-13 18:16:25.000000000 -0100 @@ -29,8 +29,11 @@ config AGP_GART bool "/dev/agpgart (AGP Support)" depends on GART_IOMMU +config AGP3 + bool "AGP 3.0 compliance (EXPERIMENTAL)" + config AGP_INTEL - bool "Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support" + tristate "Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support" depends on AGP help This option gives you AGP support for the GLX component of the @@ -40,7 +43,7 @@ config AGP_INTEL use GLX or DRI. If unsure, say N. #config AGP_I810 -# bool "Intel I810/I815/I830M (on-board) support" +# tristate "Intel I810/I815/I830M (on-board) support" # depends on AGP # help # This option gives you AGP support for the Xserver on the Intel 810 @@ -48,7 +51,7 @@ config AGP_INTEL # is required to do any useful video modes with these boards. config AGP_VIA - bool "VIA chipset support" + tristate "VIA chipset support" depends on AGP help This option gives you AGP support for the GLX component of the @@ -58,7 +61,7 @@ config AGP_VIA use GLX or DRI. If unsure, say N. config AGP_AMD - bool "AMD Irongate, 761, and 762 support" + tristate "AMD Irongate, 761, and 762 support" depends on AGP help This option gives you AGP support for the GLX component of the @@ -68,7 +71,7 @@ config AGP_AMD use GLX or DRI. If unsure, say N. config AGP_SIS - bool "Generic SiS support" + tristate "Generic SiS support" depends on AGP help This option gives you AGP support for the GLX component of the "soon @@ -81,7 +84,7 @@ config AGP_SIS use GLX or DRI. If unsure, say N. config AGP_ALI - bool "ALI chipset support" + tristate "ALI chipset support" depends on AGP ---help--- This option gives you AGP support for the GLX component of the @@ -99,14 +102,14 @@ config AGP_ALI use GLX or DRI. If unsure, say N. config AGP_SWORKS - bool "Serverworks LE/HE support" + tristate "Serverworks LE/HE support" depends on AGP help Say Y here to support the Serverworks AGP card. See for product descriptions and images. config AGP_AMD_8151 - bool "AMD 8151 support" + tristate "AMD 8151 support" depends on AGP default GART_IOMMU help @@ -114,16 +117,28 @@ config AGP_AMD_8151 GART on the AMD Athlon64/Opteron ("Hammer") CPUs. config AGP_I460 - bool "Intel 460GX support" + tristate "Intel 460GX support" depends on AGP && IA64 help This option gives you AGP GART support for the Intel 460GX chipset for IA64 processors. config AGP_HP_ZX1 - bool "HP ZX1 AGP support" + tristate "HP ZX1 AGP support" depends on AGP && IA64 help This option gives you AGP GART support for the HP ZX1 chipset for IA64 processors. +# Put AGP 3.0 entries below here. + +config AGP_I7505 + tristate "Intel 7205/7505 support (AGP 3.0)" + depends on AGP3 + help + This option gives you AGP support for the GLX component of the + XFree86 4.x on Intel I7505 chipsets. + + You should say Y here if you use XFree86 3.3.6 or 4.x and want to + use GLX or DRI. If unsure, say N + diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/Makefile agpgart/drivers/char/agp/Makefile --- bk-linus/drivers/char/agp/Makefile 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/Makefile 2002-12-13 18:16:25.000000000 -0100 @@ -8,6 +8,7 @@ export-objs := backend.o agpgart-y := backend.o frontend.o generic.o agpgart-objs := $(agpgart-y) obj-$(CONFIG_AGP) += agpgart.o +obj-$(CONFIG_AGP3) += generic-3.0.o obj-$(CONFIG_AGP_INTEL) += intel-agp.o obj-$(CONFIG_AGP_VIA) += via-agp.o @@ -19,3 +20,5 @@ obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o obj-$(CONFIG_AGP_AMD_8151) += amd-k8-agp.o +obj-$(CONFIG_AGP_I7x05) += i7x05-agp.o + diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/agp.h agpgart/drivers/char/agp/agp.h --- bk-linus/drivers/char/agp/agp.h 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/agp.h 2002-12-13 17:36:51.000000000 -0100 @@ -46,28 +46,6 @@ int agp_generic_suspend(void); void agp_generic_resume(void); void agp_free_key(int key); -/* chipset specific init routines. */ -/* -int __init ali_generic_setup (struct pci_dev *pdev); -int __init amd_irongate_setup (struct pci_dev *pdev); -int __init amd_8151_setup (struct pci_dev *pdev); -int __init hp_zx1_setup (struct pci_dev *pdev); -int __init intel_i460_setup (struct pci_dev *pdev); -int __init intel_generic_setup (struct pci_dev *pdev); -int __init intel_i810_setup(struct pci_dev *i810_dev); -int __init intel_815_setup(struct pci_dev *pdev); -int __init intel_i830_setup(struct pci_dev *i830_dev); -int __init intel_820_setup (struct pci_dev *pdev); -int __init intel_830mp_setup (struct pci_dev *pdev); -int __init intel_840_setup (struct pci_dev *pdev); -int __init intel_845_setup (struct pci_dev *pdev); -int __init intel_850_setup (struct pci_dev *pdev); -int __init intel_860_setup (struct pci_dev *pdev); -int __init serverworks_setup (struct pci_dev *pdev); -int __init sis_generic_setup (struct pci_dev *pdev); -int __init via_generic_setup (struct pci_dev *pdev); -*/ - #define PFX "agpgart: " int agp_register_driver (struct pci_dev *dev); diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/amd-k7-agp.c agpgart/drivers/char/agp/amd-k7-agp.c --- bk-linus/drivers/char/agp/amd-k7-agp.c 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/amd-k7-agp.c 2002-12-13 18:16:25.000000000 -0100 @@ -138,8 +138,8 @@ static int amd_create_gatt_table(void) return retval; } - agp_bridge.gatt_table_real = (u32 *)page_dir.real; - agp_bridge.gatt_table = (u32 *)page_dir.remapped; + agp_bridge.gatt_table_real = (unsigned long *)page_dir.real; + agp_bridge.gatt_table = (unsigned long *)page_dir.remapped; agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real); /* Get the address for the gart region. @@ -165,8 +165,8 @@ static int amd_free_gatt_table(void) { struct amd_page_map page_dir; - page_dir.real = (u32 *)agp_bridge.gatt_table_real; - page_dir.remapped = (u32 *)agp_bridge.gatt_table; + page_dir.real = (unsigned long *)agp_bridge.gatt_table_real; + page_dir.remapped = (unsigned long *)agp_bridge.gatt_table; amd_free_gatt_pages(); amd_free_page_map(&page_dir); diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/amd-k8-agp.c agpgart/drivers/char/agp/amd-k8-agp.c --- bk-linus/drivers/char/agp/amd-k8-agp.c 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/amd-k8-agp.c 2002-12-13 18:16:25.000000000 -0100 @@ -151,7 +151,7 @@ static int amd_x86_64_fetch_size(void) } -static void inline flush_x86_64_tlb(struct pci_dev *dev) +static void flush_x86_64_tlb(struct pci_dev *dev) { u32 tmp; diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/backend.c agpgart/drivers/char/agp/backend.c --- bk-linus/drivers/char/agp/backend.c 2002-12-10 04:56:06.000000000 -0100 +++ agpgart/drivers/char/agp/backend.c 2002-12-13 18:16:25.000000000 -0100 @@ -36,8 +36,11 @@ #include #include "agp.h" -#define AGPGART_VERSION_MAJOR 1 -#define AGPGART_VERSION_MINOR 0 +/* Due to XFree86 brain-damage, we can't go to 1.0 until they + * fix some real stupidity. It's only by chance we can bump + * past 0.99 at all due to some boolean logic error. */ +#define AGPGART_VERSION_MAJOR 0 +#define AGPGART_VERSION_MINOR 100 struct agp_bridge_data agp_bridge = { .type = NOT_SUPPORTED }; @@ -258,7 +261,7 @@ int agp_register_driver (struct pci_dev return 0; } -int __exit agp_unregister_driver(void) +int agp_unregister_driver(void) { agp_bridge.type = NOT_SUPPORTED; pm_unregister_all(agp_power); @@ -269,8 +272,23 @@ int __exit agp_unregister_driver(void) return 0; } +int __exit agp_exit(void) +{ + if (agp_count==0) + return -EBUSY; + + return 0; +} + int __init agp_init(void) { + static int already_initialised=0; + + if (already_initialised!=0) + return 0; + + already_initialised = 1; + memset(&agp_bridge, 0, sizeof(struct agp_bridge_data)); agp_bridge.type = NOT_SUPPORTED; @@ -281,11 +299,13 @@ int __init agp_init(void) #ifndef CONFIG_GART_IOMMU module_init(agp_init); +module_exit(agp_exit); #endif EXPORT_SYMBOL(agp_backend_acquire); EXPORT_SYMBOL(agp_backend_release); EXPORT_SYMBOL_GPL(agp_register_driver); +EXPORT_SYMBOL_GPL(agp_unregister_driver); MODULE_AUTHOR("Dave Jones "); MODULE_LICENSE("GPL and additional rights"); diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/frontend.c agpgart/drivers/char/agp/frontend.c --- bk-linus/drivers/char/agp/frontend.c 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/frontend.c 2002-12-13 18:16:25.000000000 -0100 @@ -1062,9 +1062,9 @@ static struct file_operations agp_fops = static struct miscdevice agp_miscdev = { - AGPGART_MINOR, - "agpgart", - &agp_fops + .minor = AGPGART_MINOR, + .name = "agpgart", + .fops = &agp_fops }; int __init agp_frontend_initialize(void) @@ -1079,7 +1079,7 @@ int __init agp_frontend_initialize(void) return 0; } -void __exit agp_frontend_cleanup(void) +void agp_frontend_cleanup(void) { misc_deregister(&agp_miscdev); } diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/generic-3.0.c agpgart/drivers/char/agp/generic-3.0.c --- bk-linus/drivers/char/agp/generic-3.0.c 1969-12-31 23:00:00.000000000 -0100 +++ agpgart/drivers/char/agp/generic-3.0.c 2002-12-13 18:16:25.000000000 -0100 @@ -0,0 +1,556 @@ +#include +#include + + +//#include +//#include +//#include +#include + +#include "agp.h" + +/* Generic AGP 3.0 enabling routines */ + +struct agp_3_0_dev { + struct list_head list; + u8 capndx; + u32 maxbw; + struct pci_dev *dev; +}; + +static int agp_3_0_dev_list_insert(struct list_head *head, struct list_head *new) +{ + struct agp_3_0_dev *cur, *n = list_entry(new, struct agp_3_0_dev, list); + struct list_head *pos; + + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_0_dev, list); + if(cur->maxbw > n->maxbw) + break; + } + list_add_tail(new, pos); + + return 0; +} + +static int agp_3_0_dev_list_sort(struct agp_3_0_dev *list, unsigned int ndevs) +{ + struct agp_3_0_dev *cur; + struct pci_dev *dev; + struct list_head *pos, *tmp, *head = &list->list, *start = head->next; + u32 nistat; + + INIT_LIST_HEAD(head); + + for(pos = start; pos != head;) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + pci_read_config_dword(dev, cur->capndx + 0x0c, &nistat); + cur->maxbw = (nistat >> 16) & 0xff; + + tmp = pos; + pos = pos->next; + agp_3_0_dev_list_insert(head, tmp); + } + return 0; +} + +/* + * Initialize all isochronous transfer parameters for an AGP 3.0 + * node (i.e. a host bridge in combination with the adapters + * lying behind it...) + */ + +static int agp_3_0_isochronous_node_enable(struct agp_3_0_dev *dev_list, unsigned int ndevs) +{ + /* + * Convenience structure to make the calculations clearer + * here. The field names come straight from the AGP 3.0 spec. + */ + struct isoch_data { + u32 maxbw; + u32 n; + u32 y; + u32 l; + u32 rq; + struct agp_3_0_dev *dev; + }; + + struct pci_dev *td = agp_bridge.dev, *dev; + struct list_head *head = &dev_list->list, *pos; + struct agp_3_0_dev *cur; + struct isoch_data *master, target; + unsigned int cdev = 0; + u32 mnistat, tnistat, tstatus, mcmd; + u16 tnicmd, mnicmd; + u8 mcapndx; + u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; + u32 step, rem, rem_isoch, rem_async; + int ret = 0; + + /* + * We'll work with an array of isoch_data's (one for each + * device in dev_list) throughout this function. + */ + if((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto get_out; + } + + /* + * Sort the device list by maxbw. We need to do this because the + * spec suggests that the devices with the smallest requirements + * have their resources allocated first, with all remaining resources + * falling to the device with the largest requirement. + * + * We don't exactly do this, we divide target resources by ndevs + * and split them amongst the AGP 3.0 devices. The remainder of such + * division operations are dropped on the last device, sort of like + * the spec mentions it should be done. + * + * We can't do this sort when we initially construct the dev_list + * because we don't know until this function whether isochronous + * transfers are enabled and consequently whether maxbw will mean + * anything. + */ + if((ret = agp_3_0_dev_list_sort(dev_list, ndevs)) != 0) + goto free_and_exit; + + pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat); + pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus); + + /* Extract power-on defaults from the target */ + target.maxbw = (tnistat >> 16) & 0xff; + target.n = (tnistat >> 8) & 0xff; + target.y = (tnistat >> 6) & 0x3; + target.l = (tnistat >> 3) & 0x7; + target.rq = (tstatus >> 24) & 0xff; + + y_max = target.y; + + /* + * Extract power-on defaults for each device in dev_list. Along + * the way, calculate the total isochronous bandwidth required + * by these devices and the largest requested payload size. + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + mcapndx = cur->capndx; + + pci_read_config_dword(dev, cur->capndx + 0x0c, &mnistat); + + master[cdev].maxbw = (mnistat >> 16) & 0xff; + master[cdev].n = (mnistat >> 8) & 0xff; + master[cdev].y = (mnistat >> 6) & 0x3; + master[cdev].dev = cur; + + tot_bw += master[cdev].maxbw; + y_max = max(y_max, master[cdev].y); + + cdev++; + } + + /* Check if this configuration has any chance of working */ + if(tot_bw > target.maxbw) { + printk(KERN_ERR PFX "isochronous bandwidth required " + "by AGP 3.0 devices exceeds that which is supported by " + "the AGP 3.0 bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + target.y = y_max; + + /* + * Write the calculated payload size into the target's NICMD + * register. Doing this directly effects the ISOCH_N value + * in the target's NISTAT register, so we need to do this now + * to get an accurate value for ISOCH_N later. + */ + pci_read_config_word(td, agp_bridge.capndx + 0x20, &tnicmd); + tnicmd &= ~(0x3 << 6); + tnicmd |= target.y << 6; + pci_write_config_word(td, agp_bridge.capndx + 0x20, tnicmd); + + /* Reread the target's ISOCH_N */ + pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat); + target.n = (tnistat >> 8) & 0xff; + + /* Calculate the minimum ISOCH_N needed by each master */ + for(cdev = 0; cdev < ndevs; cdev++) { + master[cdev].y = target.y; + master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1); + + tot_n += master[cdev].n; + } + + /* Exit if the minimal ISOCH_N allocation among the masters is more + * than the target can handle. */ + if(tot_n > target.n) { + printk(KERN_ERR PFX "number of isochronous " + "transactions per period required by AGP 3.0 devices " + "exceeds that which is supported by the AGP 3.0 " + "bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + /* Calculate left over ISOCH_N capability in the target. We'll give + * this to the hungriest device (as per the spec) */ + rem = target.n - tot_n; + + /* + * Calculate the minimum isochronous RQ depth needed by each master. + * Along the way, distribute the extra ISOCH_N capability calculated + * above. + */ + for(cdev = 0; cdev < ndevs; cdev++) { + /* + * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y + * byte isochronous writes will be broken into 64B pieces. + * This means we need to budget more RQ depth to account for + * these kind of writes (each isochronous write is actually + * many writes on the AGP bus). + */ + master[cdev].rq = master[cdev].n; + if(master[cdev].y > 0x1) { + master[cdev].rq *= (1 << (master[cdev].y - 1)); + } + + tot_rq += master[cdev].rq; + + if(cdev == ndevs - 1) + master[cdev].n += rem; + } + + /* Figure the number of isochronous and asynchronous RQ slots the + * target is providing. */ + rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; + rq_async = target.rq - rq_isoch; + + /* Exit if the minimal RQ needs of the masters exceeds what the target + * can provide. */ + if(tot_rq > rq_isoch) { + printk(KERN_ERR PFX "number of request queue slots " + "required by the isochronous bandwidth requested by " + "AGP 3.0 devices exceeds the number provided by the " + "AGP 3.0 bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + /* Calculate asynchronous RQ capability in the target (per master) as + * well as the total number of leftover isochronous RQ slots. */ + step = rq_async / ndevs; + rem_async = step + (rq_async % ndevs); + rem_isoch = rq_isoch - tot_rq; + + /* Distribute the extra RQ slots calculated above and write our + * isochronous settings out to the actual devices. */ + for(cdev = 0; cdev < ndevs; cdev++) { + cur = master[cdev].dev; + dev = cur->dev; + + mcapndx = cur->capndx; + + master[cdev].rq += (cdev == ndevs - 1) + ? (rem_async + rem_isoch) : step; + + pci_read_config_word(dev, cur->capndx + 0x20, &mnicmd); + pci_read_config_dword(dev, cur->capndx + 0x08, &mcmd); + + mnicmd &= ~(0xff << 8); + mnicmd &= ~(0x3 << 6); + mcmd &= ~(0xff << 24); + + mnicmd |= master[cdev].n << 8; + mnicmd |= master[cdev].y << 6; + mcmd |= master[cdev].rq << 24; + + pci_write_config_dword(dev, cur->capndx + 0x08, mcmd); + pci_write_config_word(dev, cur->capndx + 0x20, mnicmd); + } + +free_and_exit: + kfree(master); + +get_out: + return ret; +} + +/* + * This function basically allocates request queue slots among the + * AGP 3.0 systems in nonisochronous nodes. The algorithm is + * pretty stupid, divide the total number of RQ slots provided by the + * target by ndevs. Distribute this many slots to each AGP 3.0 device, + * giving any left over slots to the last device in dev_list. + */ +static int agp_3_0_nonisochronous_node_enable(struct agp_3_0_dev *dev_list, unsigned int ndevs) +{ + struct agp_3_0_dev *cur; + struct list_head *head = &dev_list->list, *pos; + u32 tstatus, mcmd; + u32 trq, mrq, rem; + unsigned int cdev = 0; + + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x04, &tstatus); + + trq = (tstatus >> 24) & 0xff; + mrq = trq / ndevs; + + rem = mrq + (trq % ndevs); + + for(pos = head->next; cdev < ndevs; cdev++, pos = pos->next) { + cur = list_entry(pos, struct agp_3_0_dev, list); + + pci_read_config_dword(cur->dev, cur->capndx + 0x08, &mcmd); + mcmd &= ~(0xff << 24); + mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24; + pci_write_config_dword(cur->dev, cur->capndx + 0x08, mcmd); + } + + return 0; +} + +/* + * Fully configure and enable an AGP 3.0 host bridge and all the devices + * lying behind it. + */ +static int agp_3_0_node_enable(u32 mode, u32 minor) +{ + struct pci_dev *td = agp_bridge.dev, *dev; + u8 bus_num, mcapndx; + u32 isoch, arqsz, cal_cycle, tmp, rate; + u32 tstatus, tcmd, mcmd, mstatus, ncapid; + u32 mmajor, mminor; + u16 mpstat; + struct agp_3_0_dev *dev_list, *cur; + struct list_head *head, *pos; + unsigned int ndevs = 0; + int ret = 0; + + /* + * Allocate a head for our AGP 3.0 device list (multiple AGP 3.0 + * devices are allowed behind a single bridge). + */ + if((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto get_out; + } + head = &dev_list->list; + INIT_LIST_HEAD(head); + + /* + * Find all the devices on this bridge's secondary bus and add them + * to dev_list. + */ + pci_read_config_byte(td, PCI_SECONDARY_BUS, &bus_num); + pci_for_each_dev(dev) { + if(dev->bus->number == bus_num) { + if((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto free_and_exit; + } + + cur->dev = dev; + + pos = &cur->list; + list_add(pos, head); + ndevs++; + } + } + + /* Extract some power-on defaults from the target */ + pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus); + isoch = (tstatus >> 17) & 0x1; + arqsz = (tstatus >> 13) & 0x7; + cal_cycle = (tstatus >> 10) & 0x7; + rate = tstatus & 0x7; + + /* + * Take an initial pass through the devices lying behind our host + * bridge. Make sure each one is actually an AGP 3.0 device, otherwise + * exit with an error message. Along the way store the AGP 3.0 + * cap_ptr for each device, the minimum supported cal_cycle, and the + * minimum supported data rate. + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + pci_read_config_word(dev, PCI_STATUS, &mpstat); + if((mpstat & PCI_STATUS_CAP_LIST) == 0) + continue; + + pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx); + if (mcapndx != 0x00) { + do { + pci_read_config_dword(dev, mcapndx, &ncapid); + if ((ncapid & 0xff) != 0x02) + mcapndx = (ncapid >> 8) & 0xff; + } + while (((ncapid & 0xff) != 0x02) && (mcapndx != 0x00)); + } + + if(mcapndx == 0) { + printk(KERN_ERR PFX "woah! Non-AGP device " + "found on the secondary bus of an AGP 3.0 " + "bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + mmajor = (ncapid >> 20) & 0xf; + mminor = (ncapid >> 16) & 0xf; + + if(mmajor < 3) { + printk(KERN_ERR PFX "woah! AGP 2.0 device " + "found on the secondary bus of an AGP 3.0 " + "bridge operating with AGP 3.0 electricals!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + cur->capndx = mcapndx; + + pci_read_config_dword(dev, cur->capndx + 0x04, &mstatus); + + if(((mstatus >> 3) & 0x1) == 0) { + printk(KERN_ERR PFX "woah! AGP 3.0 device " + "not operating in AGP 3.0 mode found on the " + "secondary bus of an AGP 3.0 bridge operating " + "with AGP 3.0 electricals!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + tmp = (mstatus >> 10) & 0x7; + cal_cycle = min(cal_cycle, tmp); + + /* figure the lesser rate */ + tmp = mstatus & 0x7; + if(tmp < rate) + rate = tmp; + + } + + /* Turn rate into something we can actually write out to AGPCMD */ + switch(rate) { + case 0x1: + case 0x2: + break; + case 0x3: + rate = 0x2; + break; + default: + printk(KERN_ERR PFX "woah! Bogus AGP rate " + "value found advertised behind an AGP 3.0 " + "bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + /* + * Call functions to divide target resources amongst the AGP 3.0 + * masters. This process is dramatically different depending on + * whether isochronous transfers are supported. + */ + if(isoch != 0) { + if((ret = agp_3_0_isochronous_node_enable(dev_list, + ndevs)) != 0) + goto free_and_exit; + } else { + if((ret = agp_3_0_nonisochronous_node_enable(dev_list, + ndevs)) != 0) + goto free_and_exit; + } + + /* + * Set the calculated minimum supported cal_cycle and minimum + * supported transfer rate in the target's AGPCMD register. + * Also set the AGP_ENABLE bit, effectively 'turning on' the + * target (this has to be done _before_ turning on the masters). + */ + pci_read_config_dword(td, agp_bridge.capndx + 0x08, &tcmd); + + tcmd &= ~(0x7 << 10); + tcmd &= ~0x7; + + tcmd |= cal_cycle << 10; + tcmd |= 0x1 << 8; + tcmd |= rate; + + pci_write_config_dword(td, agp_bridge.capndx + 0x08, tcmd); + + /* + * Set the target's advertised arqsz value, the minimum supported + * transfer rate, and the AGP_ENABLE bit in each master's AGPCMD + * register. + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + mcapndx = cur->capndx; + + pci_read_config_dword(dev, cur->capndx + 0x08, &mcmd); + + mcmd &= ~(0x7 << 13); + mcmd &= ~0x7; + + mcmd |= arqsz << 13; + mcmd |= 0x1 << 8; + mcmd |= rate; + + pci_write_config_dword(dev, cur->capndx + 0x08, mcmd); + } + +free_and_exit: + /* Be sure to free the dev_list */ + for(pos = head->next; pos != head;) { + cur = list_entry(pos, struct agp_3_0_dev, list); + + pos = pos->next; + kfree(cur); + } + kfree(dev_list); + +get_out: + return ret; +} + +/* + * Entry point to AGP 3.0 host bridge init. Check to see if we + * have an AGP 3.0 device operating in 3.0 mode. Call + * agp_3_0_node_enable or agp_generic_agp_enable if we don't + * (AGP 3.0 devices are required to operate as AGP 2.0 devices + * when not using 3.0 electricals. + */ +void agp_generic_agp_3_0_enable(u32 mode) +{ + u32 ncapid, major, minor, agp_3_0; + + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx, &ncapid); + + major = (ncapid >> 20) & 0xf; + minor = (ncapid >> 16) & 0xf; + + printk(KERN_INFO PFX "Found an AGP %d.%d compliant device.\n", + major, minor); + + if(major >= 3) { + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x4, &agp_3_0); + /* + * Check to see if we are operating in 3.0 mode + */ + if((agp_3_0 >> 3) & 0x1) { + agp_3_0_node_enable(mode, minor); + return; + } + } + agp_generic_agp_enable(mode); +} + diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/generic.c agpgart/drivers/char/agp/generic.c --- bk-linus/drivers/char/agp/generic.c 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/generic.c 2002-12-13 15:10:22.000000000 -0100 @@ -469,7 +469,7 @@ int agp_generic_create_gatt_table(void) for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); - agp_bridge.gatt_table_real = (u32 *) table; + agp_bridge.gatt_table_real = (unsigned long *) table; agp_gatt_table = (void *)table; CACHE_FLUSH(); agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), @@ -693,7 +693,23 @@ void agp_enable(u32 mode) EXPORT_SYMBOL(agp_free_memory); EXPORT_SYMBOL(agp_allocate_memory); EXPORT_SYMBOL(agp_copy_info); +EXPORT_SYMBOL(agp_create_memory); EXPORT_SYMBOL(agp_bind_memory); EXPORT_SYMBOL(agp_unbind_memory); +EXPORT_SYMBOL(agp_free_key); EXPORT_SYMBOL(agp_enable); +EXPORT_SYMBOL(agp_bridge); + +EXPORT_SYMBOL(agp_generic_alloc_page); +EXPORT_SYMBOL(agp_generic_destroy_page); +EXPORT_SYMBOL(agp_generic_suspend); +EXPORT_SYMBOL(agp_generic_resume); +EXPORT_SYMBOL(agp_generic_agp_enable); +EXPORT_SYMBOL(agp_generic_create_gatt_table); +EXPORT_SYMBOL(agp_generic_free_gatt_table); +EXPORT_SYMBOL(agp_generic_insert_memory); +EXPORT_SYMBOL(agp_generic_remove_memory); +EXPORT_SYMBOL(agp_generic_alloc_by_type); +EXPORT_SYMBOL(agp_generic_free_by_type); +EXPORT_SYMBOL(global_cache_flush); diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/hp-agp.c agpgart/drivers/char/agp/hp-agp.c --- bk-linus/drivers/char/agp/hp-agp.c 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/hp-agp.c 2002-12-13 14:43:16.000000000 -0100 @@ -18,8 +18,7 @@ #define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL #define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL -#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> \ - hp_private.io_tlb_shift) +#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift) static struct aper_size_info_fixed hp_zx1_sizes[] = { @@ -330,12 +329,7 @@ static unsigned long hp_zx1_mask_memory( return HP_ZX1_PDIR_VALID_BIT | addr; } -static unsigned long hp_zx1_unmask_memory(unsigned long addr) -{ - return addr & ~(HP_ZX1_PDIR_VALID_BIT); -} - -int __init hp_zx1_setup (struct pci_dev *pdev) +int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge.masks = hp_zx1_masks; agp_bridge.num_of_masks = 1; @@ -347,7 +341,6 @@ int __init hp_zx1_setup (struct pci_dev agp_bridge.cleanup = hp_zx1_cleanup; agp_bridge.tlb_flush = hp_zx1_tlbflush; agp_bridge.mask_memory = hp_zx1_mask_memory; - agp_bridge.unmask_memory = hp_zx1_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; @@ -375,8 +368,6 @@ static int __init agp_find_supported_dev return hp_zx1_setup(dev); } return -ENODEV; -} - static int agp_hp_probe (struct pci_dev *dev, const struct pci_device_id *ent) { diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/i460-agp.c agpgart/drivers/char/agp/i460-agp.c --- bk-linus/drivers/char/agp/i460-agp.c 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/i460-agp.c 2002-12-13 14:46:25.000000000 -0100 @@ -1,47 +1,95 @@ /* - * FIXME: Nothing ever calls this stuff! + * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of + * the "Intel 460GTX Chipset Software Developer's Manual": + * http://developer.intel.com/design/itanium/downloads/24870401s.htm + */ +/* + * 460GX support by Chris Ahna + * Clean up & simplification by David Mosberger-Tang */ - #include #include #include #include -#include "agp.h" -/* BIOS configures the chipset so that one of two apbase registers are used */ -static u8 intel_i460_dynamic_apbase = 0x10; +#include "agp.h" -/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ -static u8 intel_i460_pageshift = 12; -static u32 intel_i460_pagesize; - -/* Keep track of which is larger, chipset or kernel page size. */ -static u32 intel_i460_cpk = 1; - -/* Structure for tracking partial use of 4MB GART pages */ -static u32 **i460_pg_detail = NULL; -static u32 *i460_pg_count = NULL; +/* + * The i460 can operate with large (4MB) pages, but there is no sane way to support this + * within the current kernel/DRM environment, so we disable the relevant code for now. + * See also comments in ia64_alloc_page()... + */ +#define I460_LARGE_IO_PAGES 0 -#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift) -#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT) +#if I460_LARGE_IO_PAGES +# define I460_IO_PAGE_SHIFT i460.io_page_shift +#else +# define I460_IO_PAGE_SHIFT 12 +#endif +#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT) +#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT)) #define I460_SRAM_IO_DISABLE (1 << 4) #define I460_BAPBASE_ENABLE (1 << 3) #define I460_AGPSIZ_MASK 0x7 #define I460_4M_PS (1 << 1) -#define log2(x) ffz(~(x)) +/* Control bits for Out-Of-GART coherency and Burst Write Combining */ +#define I460_GXBCTL_OOG (1UL << 0) +#define I460_GXBCTL_BWC (1UL << 2) + +/* + * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the + * gatt_table and gatt_table_real pointers a "void *"... + */ +#define RD_GATT(index) readl((u32 *) i460.gatt + (index)) +#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index)) +/* + * The 460 spec says we have to read the last location written to make sure that all + * writes have taken effect + */ +#define WR_FLUSH_GATT(index) RD_GATT(index) + +#define log2(x) ffz(~(x)) + +static struct { + void *gatt; /* ioremap'd GATT area */ + + /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */ + u8 io_page_shift; + + /* BIOS configures chipset to one of 2 possible apbase values: */ + u8 dynamic_apbase; -static inline void intel_i460_read_back (volatile u32 *entry) + /* structure for tracking partial use of 4MB GART pages: */ + struct lp_desc { + unsigned long *alloced_map; /* bitmap of kernel-pages in use */ + int refcount; /* number of kernel pages using the large page */ + u64 paddr; /* physical address of large page */ + } *lp_desc; +} i460; + +static const struct aper_size_info_8 i460_sizes[3] = { /* - * The 460 spec says we have to read the last location written to - * make sure that all writes have taken effect + * The 32GB aperture is only available with a 4M GART page size. Due to the + * dynamic GART page size, we can't figure out page_order or num_entries until + * runtime. */ - *entry; -} + {32768, 0, 0, 4}, + {1024, 0, 0, 2}, + {256, 0, 0, 1} +}; -static int intel_i460_fetch_size(void) +static struct gatt_mask i460_masks[] = +{ + { + .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, + .type = 0 + } +}; + +static int i460_fetch_size (void) { int i; u8 temp; @@ -49,8 +97,15 @@ static int intel_i460_fetch_size(void) /* Determine the GART page size */ pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); - intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12; - intel_i460_pagesize = 1UL << intel_i460_pageshift; + i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; + pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift); + + if (i460.io_page_shift != I460_IO_PAGE_SHIFT) { + printk(KERN_ERR PFX + "I/O (GART) page-size %ZuKB doesn't match expected size %ZuKB\n", + 1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT)); + return 0; + } values = A_SIZE_8(agp_bridge.aperture_sizes); @@ -64,16 +119,16 @@ static int intel_i460_fetch_size(void) } /* Make sure we don't try to create an 2 ^ 23 entry GATT */ - if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { + if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); return 0; } /* Determine the proper APBASE register */ if (temp & I460_BAPBASE_ENABLE) - intel_i460_dynamic_apbase = INTEL_I460_BAPBASE; + i460.dynamic_apbase = INTEL_I460_BAPBASE; else - intel_i460_dynamic_apbase = INTEL_I460_APBASE; + i460.dynamic_apbase = INTEL_I460_APBASE; for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { /* @@ -81,7 +136,7 @@ static int intel_i460_fetch_size(void) * the define aperture sizes. Take care not to shift off the end of * values[i].size. */ - values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12); + values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); } @@ -98,7 +153,7 @@ static int intel_i460_fetch_size(void) } /* There isn't anything to do here since 460 has no GART TLB. */ -static void intel_i460_tlb_flush(agp_memory * mem) +static void i460_tlb_flush (agp_memory * mem) { return; } @@ -107,7 +162,7 @@ static void intel_i460_tlb_flush(agp_mem * This utility function is needed to prevent corruption of the control bits * which are stored along with the aperture size in 460's AGPSIZ register */ -static void intel_i460_write_agpsiz(u8 size_value) +static void i460_write_agpsiz (u8 size_value) { u8 temp; @@ -116,47 +171,39 @@ static void intel_i460_write_agpsiz(u8 s ((temp & ~I460_AGPSIZ_MASK) | size_value)); } -static void intel_i460_cleanup(void) +static void i460_cleanup (void) { struct aper_size_info_8 *previous_size; previous_size = A_SIZE_8(agp_bridge.previous_size); - intel_i460_write_agpsiz(previous_size->size_value); + i460_write_agpsiz(previous_size->size_value); - if (intel_i460_cpk == 0) { - vfree(i460_pg_detail); - vfree(i460_pg_count); - } + if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) + kfree(i460.lp_desc); } - -/* Control bits for Out-Of-GART coherency and Burst Write Combining */ -#define I460_GXBCTL_OOG (1UL << 0) -#define I460_GXBCTL_BWC (1UL << 2) - -static int intel_i460_configure(void) +static int i460_configure (void) { union { u32 small[2]; u64 large; } temp; + size_t size; u8 scratch; - int i; - struct aper_size_info_8 *current_size; temp.large = 0; current_size = A_SIZE_8(agp_bridge.current_size); - intel_i460_write_agpsiz(current_size->size_value); + i460_write_agpsiz(current_size->size_value); /* * Do the necessary rigmarole to read all eight bytes of APBASE. * This has to be done since the AGP aperture can be above 4GB on * 460 based systems. */ - pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0])); - pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1])); + pci_read_config_dword(agp_bridge.dev, i460.dynamic_apbase, &(temp.small[0])); + pci_read_config_dword(agp_bridge.dev, i460.dynamic_apbase + 4, &(temp.small[1])); /* Clear BAR control bits */ agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); @@ -166,403 +213,347 @@ static int intel_i460_configure(void) (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); /* - * Initialize partial allocation trackers if a GART page is bigger than - * a kernel page. + * Initialize partial allocation trackers if a GART page is bigger than a kernel + * page. */ - if (I460_CPAGES_PER_KPAGE >= 1) { - intel_i460_cpk = 1; - } else { - intel_i460_cpk = 0; - - i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries); - i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries); - - for (i = 0; i < current_size->num_entries; i++) { - i460_pg_count[i] = 0; - i460_pg_detail[i] = NULL; - } + if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { + size = current_size->num_entries * sizeof(i460.lp_desc[0]); + i460.lp_desc = kmalloc(size, GFP_KERNEL); + if (!i460.lp_desc) + return -ENOMEM; + memset(i460.lp_desc, 0, size); } return 0; } -static int intel_i460_create_gatt_table(void) +static int i460_create_gatt_table (void) { - char *table; - int i; - int page_order; - int num_entries; + int page_order, num_entries, i; void *temp; /* - * Load up the fixed address of the GART SRAMS which hold our - * GATT table. + * Load up the fixed address of the GART SRAMS which hold our GATT table. */ - table = (char *) __va(INTEL_I460_ATTBASE); - temp = agp_bridge.current_size; page_order = A_SIZE_8(temp)->page_order; num_entries = A_SIZE_8(temp)->num_entries; - agp_bridge.gatt_table_real = (u32 *) table; - agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), - (PAGE_SIZE * (1 << page_order))); - agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); - - for (i = 0; i < num_entries; i++) { - agp_bridge.gatt_table[i] = 0; - } + i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order); - intel_i460_read_back(agp_bridge.gatt_table + i - 1); + /* These are no good, the should be removed from the agp_bridge strucure... */ + agp_bridge.gatt_table_real = NULL; + agp_bridge.gatt_table = NULL; + agp_bridge.gatt_bus_addr = 0; + + for (i = 0; i < num_entries; ++i) + WR_GATT(i, 0); + WR_FLUSH_GATT(i - 1); return 0; } -static int intel_i460_free_gatt_table(void) +static int i460_free_gatt_table (void) { - int num_entries; - int i; + int num_entries, i; void *temp; temp = agp_bridge.current_size; num_entries = A_SIZE_8(temp)->num_entries; - for (i = 0; i < num_entries; i++) { - agp_bridge.gatt_table[i] = 0; - } - - intel_i460_read_back(agp_bridge.gatt_table + i - 1); + for (i = 0; i < num_entries; ++i) + WR_GATT(i, 0); + WR_FLUSH_GATT(num_entries - 1); - iounmap(agp_bridge.gatt_table); + iounmap(i460.gatt); return 0; } -/* These functions are called when PAGE_SIZE exceeds the GART page size */ +/* + * The following functions are called when the I/O (GART) page size is smaller than + * PAGE_SIZE. + */ -static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type) +static int i460_insert_memory_small_io_page (agp_memory *mem, off_t pg_start, int type) { + unsigned long paddr, io_pg_start, io_page_size; int i, j, k, num_entries; void *temp; - unsigned long paddr; - /* - * The rest of the kernel will compute page offsets in terms of - * PAGE_SIZE. - */ - pg_start = I460_CPAGES_PER_KPAGE * pg_start; + pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", + mem, pg_start, type, mem->memory[0]); + + io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; temp = agp_bridge.current_size; num_entries = A_SIZE_8(temp)->num_entries; - if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) { + if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); return -EINVAL; } - j = pg_start; - while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) { - if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + j = io_pg_start; + while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { + if (!PGE_EMPTY(RD_GATT(j))) { + pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n", + j, RD_GATT(j)); return -EBUSY; } j++; } -#if 0 - /* not necessary since 460 GART is operated in coherent mode... */ - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } -#endif - - for (i = 0, j = pg_start; i < mem->page_count; i++) { + io_page_size = 1UL << I460_IO_PAGE_SHIFT; + for (i = 0, j = io_pg_start; i < mem->page_count; i++) { paddr = mem->memory[i]; - for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize) - agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type); + for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) + WR_GATT(j, agp_bridge.mask_memory(paddr, mem->type)); } - - intel_i460_read_back(agp_bridge.gatt_table + j - 1); + WR_FLUSH_GATT(j - 1); return 0; } -static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type) +static int i460_remove_memory_small_io_page(agp_memory * mem, off_t pg_start, int type) { int i; - pg_start = I460_CPAGES_PER_KPAGE * pg_start; + pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n", + mem, pg_start, type); - for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++) - agp_bridge.gatt_table[i] = 0; + pg_start = I460_IOPAGES_PER_KPAGE * pg_start; - intel_i460_read_back(agp_bridge.gatt_table + i - 1); + for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) + WR_GATT(i, 0); + WR_FLUSH_GATT(i - 1); return 0; } +#if I460_LARGE_IO_PAGES + /* - * These functions are called when the GART page size exceeds PAGE_SIZE. + * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE. * - * This situation is interesting since AGP memory allocations that are - * smaller than a single GART page are possible. The structures i460_pg_count - * and i460_pg_detail track partial allocation of the large GART pages to - * work around this issue. + * This situation is interesting since AGP memory allocations that are smaller than a + * single GART page are possible. The i460.lp_desc array tracks partial allocation of the + * large GART pages to work around this issue. * - * i460_pg_count[pg_num] tracks the number of kernel pages in use within - * GART page pg_num. i460_pg_detail[pg_num] is an array containing a - * psuedo-GART entry for each of the aforementioned kernel pages. The whole - * of i460_pg_detail is equivalent to a giant GATT with page size equal to - * that of the kernel. + * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page + * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and + * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated). */ -static void *intel_i460_alloc_large_page(int pg_num) +static int i460_alloc_large_page (struct lp_desc *lp) { - int i; - void *bp, *bp_end; - struct page *page; - - i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE); - if (i460_pg_detail[pg_num] == NULL) { - printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); - return NULL; - } - - for (i = 0; i < I460_KPAGES_PER_CPAGE; i++) - i460_pg_detail[pg_num][i] = 0; + unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; + size_t map_size; + void *lpage; - bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT); - if (bp == NULL) { + lpage = (void *) __get_free_pages(GFP_KERNEL, order); + if (!lpage) { printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); - return NULL; + return -ENOMEM; } - bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1); - - for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) { - atomic_inc(&agp_bridge.current_memory_agp); + map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; + lp->alloced_map = kmalloc(map_size, GFP_KERNEL); + if (!lp->alloced_map) { + free_pages((unsigned long) lpage, order); + printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); + return -ENOMEM; } - return bp; + memset(lp->alloced_map, 0, map_size); + + lp->paddr = virt_to_phys(lpage); + lp->refcount = 0; + atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge.current_memory_agp); + return 0; } -static void intel_i460_free_large_page(int pg_num, unsigned long addr) +static void i460_free_large_page (struct lp_desc *lp) { - struct page *page; - void *bp, *bp_end; - - bp = (void *) __va(addr); - bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))); + kfree(lp->alloced_map); + lp->alloced_map = NULL; - vfree(i460_pg_detail[pg_num]); - i460_pg_detail[pg_num] = NULL; - - for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) { - atomic_dec(&agp_bridge.current_memory_agp); - } - - free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT); + free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); + atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge.current_memory_agp); } -static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type) +static int i460_insert_memory_large_io_page (agp_memory * mem, off_t pg_start, int type) { - int i, pg, start_pg, end_pg, start_offset, end_offset, idx; - int num_entries; + int i, start_offset, end_offset, idx, pg, num_entries; + struct lp_desc *start, *end, *lp; void *temp; - unsigned long paddr; temp = agp_bridge.current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ - start_pg = pg_start / I460_KPAGES_PER_CPAGE; - start_offset = pg_start % I460_KPAGES_PER_CPAGE; - end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; - end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; + start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; + end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; + start_offset = pg_start % I460_KPAGES_PER_IOPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; - if (end_pg > num_entries) { + if (end > i460.lp_desc + num_entries) { printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); return -EINVAL; } /* Check if the requested region of the aperture is free */ - for (pg = start_pg; pg <= end_pg; pg++) { - /* Allocate new GART pages if necessary */ - if (i460_pg_detail[pg] == NULL) { - temp = intel_i460_alloc_large_page(pg); - if (temp == NULL) - return -ENOMEM; - agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp, - 0); - intel_i460_read_back(agp_bridge.gatt_table + pg); - } + for (lp = start; lp <= end; ++lp) { + if (!lp->alloced_map) + continue; /* OK, the entire large page is available... */ - for (idx = ((pg == start_pg) ? start_offset : 0); - idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + for (idx = ((lp == start) ? start_offset : 0); + idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++) { - if (i460_pg_detail[pg][idx] != 0) + if (test_bit(idx, lp->alloced_map)) return -EBUSY; } } -#if 0 - /* not necessary since 460 GART is operated in coherent mode... */ - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } -#endif + for (lp = start, i = 0; lp <= end; ++lp) { + if (!lp->alloced_map) { + /* Allocate new GART pages... */ + if (i460_alloc_large_page(lp) < 0) + return -ENOMEM; + pg = lp - i460.lp_desc; + WR_GATT(pg, agp_bridge.mask_memory(lp->paddr, 0)); + WR_FLUSH_GATT(pg); + } - for (pg = start_pg, i = 0; pg <= end_pg; pg++) { - paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); - for (idx = ((pg == start_pg) ? start_offset : 0); - idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); + for (idx = ((lp == start) ? start_offset : 0); + idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); idx++, i++) { - mem->memory[i] = paddr + (idx * PAGE_SIZE); - i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i], - mem->type); - i460_pg_count[pg]++; + mem->memory[i] = lp->paddr + idx*PAGE_SIZE; + __set_bit(idx, lp->alloced_map); + ++lp->refcount; } } - return 0; } -static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type) +static int i460_remove_memory_large_io_page (agp_memory * mem, off_t pg_start, int type) { - int i, pg, start_pg, end_pg, start_offset, end_offset, idx; - int num_entries; + int i, pg, start_offset, end_offset, idx, num_entries; + struct lp_desc *start, *end, *lp; void *temp; - unsigned long paddr; temp = agp_bridge.current_size; num_entries = A_SIZE_8(temp)->num_entries; /* Figure out what pg_start means in terms of our large GART pages */ - start_pg = pg_start / I460_KPAGES_PER_CPAGE; - start_offset = pg_start % I460_KPAGES_PER_CPAGE; - end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE; - end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE; - - for (i = 0, pg = start_pg; pg <= end_pg; pg++) { - for (idx = ((pg == start_pg) ? start_offset : 0); - idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE); - idx++, i++) + start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; + end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; + start_offset = pg_start % I460_KPAGES_PER_IOPAGE; + end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; + + for (i = 0, lp = start; lp <= end; ++lp) { + for (idx = ((lp == start) ? start_offset : 0); + idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); + idx++, i++) { mem->memory[i] = 0; - i460_pg_detail[pg][idx] = 0; - i460_pg_count[pg]--; + __clear_bit(idx, lp->alloced_map); + --lp->refcount; } /* Free GART pages if they are unused */ - if (i460_pg_count[pg] == 0) { - paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]); - agp_bridge.gatt_table[pg] = agp_bridge.scratch_page; - intel_i460_read_back(agp_bridge.gatt_table + pg); - intel_i460_free_large_page(pg, paddr); + if (lp->refcount == 0) { + pg = lp - i460.lp_desc; + WR_GATT(pg, 0); + WR_FLUSH_GATT(pg); + i460_free_large_page(lp); } } return 0; } -/* Dummy routines to call the approriate {cpk,kpc} function */ +/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */ -static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type) +static int i460_insert_memory (agp_memory * mem, off_t pg_start, int type) { - if (intel_i460_cpk) - return intel_i460_insert_memory_cpk(mem, pg_start, type); + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) + return i460_insert_memory_small_io_page(mem, pg_start, type); else - return intel_i460_insert_memory_kpc(mem, pg_start, type); + return i460_insert_memory_large_io_page(mem, pg_start, type); } -static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type) +static int i460_remove_memory (agp_memory * mem, off_t pg_start, int type) { - if (intel_i460_cpk) - return intel_i460_remove_memory_cpk(mem, pg_start, type); + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) + return i460_remove_memory_small_io_page(mem, pg_start, type); else - return intel_i460_remove_memory_kpc(mem, pg_start, type); + return i460_remove_memory_large_io_page(mem, pg_start, type); } /* - * If the kernel page size is smaller that the chipset page size, we don't - * want to allocate memory until we know where it is to be bound in the - * aperture (a multi-kernel-page alloc might fit inside of an already - * allocated GART page). Consequently, don't allocate or free anything - * if i460_cpk (meaning chipset pages per kernel page) isn't set. + * If the I/O (GART) page size is bigger than the kernel page size, we don't want to + * allocate memory until we know where it is to be bound in the aperture (a + * multi-kernel-page alloc might fit inside of an already allocated GART page). * - * Let's just hope nobody counts on the allocated AGP memory being there - * before bind time (I don't think current drivers do)... + * Let's just hope nobody counts on the allocated AGP memory being there before bind time + * (I don't think current drivers do)... */ -static void * intel_i460_alloc_page(void) +static void *i460_alloc_page (void) { - if (intel_i460_cpk) - return agp_generic_alloc_page(); + void *page; - /* Returning NULL would cause problems */ - /* AK: really dubious code. */ - return (void *)~0UL; + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) + page = agp_generic_alloc_page(); + else + /* Returning NULL would cause problems */ + /* AK: really dubious code. */ + page = (void *)~0UL; + return page; } -static void intel_i460_destroy_page(void *page) +static void i460_destroy_page (void *page) { - if (intel_i460_cpk) + if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) agp_generic_destroy_page(page); } -static struct gatt_mask intel_i460_masks[] = -{ - { - .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, - .type = 0 - } -}; +#endif /* I460_LARGE_IO_PAGES */ -static unsigned long intel_i460_mask_memory(unsigned long addr, int type) +static unsigned long i460_mask_memory (unsigned long addr, int type) { /* Make sure the returned address is a valid GATT entry */ return (agp_bridge.masks[0].mask - | (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12)); + | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12)); } -static unsigned long intel_i460_unmask_memory(unsigned long addr) -{ - /* Turn a GATT entry into a physical address */ - return ((addr & 0xffffff) << 12); -} - -static struct aper_size_info_8 intel_i460_sizes[3] = -{ - /* - * The 32GB aperture is only available with a 4M GART page size. - * Due to the dynamic GART page size, we can't figure out page_order - * or num_entries until runtime. - */ - {32768, 0, 0, 4}, - {1024, 0, 0, 2}, - {256, 0, 0, 1} -}; - int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused))) { - agp_bridge.masks = intel_i460_masks; - agp_bridge.aperture_sizes = (void *) intel_i460_sizes; + agp_bridge.num_of_masks = 1; + agp_bridge.masks = i460_masks; + agp_bridge.aperture_sizes = (void *) i460_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 3; agp_bridge.dev_private_data = NULL; agp_bridge.needs_scratch_page = FALSE; - agp_bridge.configure = intel_i460_configure; - agp_bridge.fetch_size = intel_i460_fetch_size; - agp_bridge.cleanup = intel_i460_cleanup; - agp_bridge.tlb_flush = intel_i460_tlb_flush; - agp_bridge.mask_memory = intel_i460_mask_memory; - agp_bridge.unmask_memory = intel_i460_unmask_memory; + agp_bridge.configure = i460_configure; + agp_bridge.fetch_size = i460_fetch_size; + agp_bridge.cleanup = i460_cleanup; + agp_bridge.tlb_flush = i460_tlb_flush; + agp_bridge.mask_memory = i460_mask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; - agp_bridge.create_gatt_table = intel_i460_create_gatt_table; - agp_bridge.free_gatt_table = intel_i460_free_gatt_table; - agp_bridge.insert_memory = intel_i460_insert_memory; - agp_bridge.remove_memory = intel_i460_remove_memory; + agp_bridge.create_gatt_table = i460_create_gatt_table; + agp_bridge.free_gatt_table = i460_free_gatt_table; +#if I460_LARGE_IO_PAGES + agp_bridge.insert_memory = i460_insert_memory; + agp_bridge.remove_memory = i460_remove_memory; + agp_bridge.agp_alloc_page = i460_alloc_page; + agp_bridge.agp_destroy_page = i460_destroy_page; +#else + agp_bridge.insert_memory = i460_insert_memory_small_io_page; + agp_bridge.remove_memory = i460_remove_memory_small_io_page; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; +#endif agp_bridge.alloc_by_type = agp_generic_alloc_by_type; agp_bridge.free_by_type = agp_generic_free_by_type; - agp_bridge.agp_alloc_page = intel_i460_alloc_page; - agp_bridge.agp_destroy_page = intel_i460_destroy_page; agp_bridge.suspend = agp_generic_suspend; agp_bridge.resume = agp_generic_resume; agp_bridge.cant_use_aperture = 1; @@ -619,6 +610,5 @@ static void __exit agp_i460_cleanup(void module_init(agp_i460_init); module_exit(agp_i460_cleanup); -MODULE_AUTHOR("Bjorn Helgaas "); +MODULE_AUTHOR("Chris Ahna "); MODULE_LICENSE("GPL and additional rights"); - diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/i7x05-agp.c agpgart/drivers/char/agp/i7x05-agp.c --- bk-linus/drivers/char/agp/i7x05-agp.c 1969-12-31 23:00:00.000000000 -0100 +++ agpgart/drivers/char/agp/i7x05-agp.c 2002-12-13 18:16:25.000000000 -0100 @@ -0,0 +1,227 @@ +#include +#include +#include +#include +#include "agp.h" + +static int intel_7505_fetch_size(void) +{ + int i; + u16 tmp; + aper_size_info_16 *values; + + /* + * For AGP 3.0 APSIZE is now 16 bits + */ + pci_read_config_word (agp_bridge.dev, INTEL_I7505_APSIZE, &tmp); + tmp = (tmp & 0xfff); + + values = A_SIZE_16(agp_bridge.aperture_sizes); + + for (i=0; i < agp_bridge.num_aperture_sizes; i++) { + if (tmp == values[i].size_value) { + agp_bridge.previous_size = agp_bridge.current_size = + (void *)(values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} + + +static void intel_7505_tlbflush(agp_memory *mem) +{ + u32 temp; + pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp & ~(1 << 7)); + pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp | (1 << 7)); +} + +static void intel_7505_cleanup(void) +{ + aper_size_info_16 *previous_size; + + previous_size = A_SIZE_16(agp_bridge.previous_size); + pci_write_config_byte(agp_bridge.dev, INTEL_I7505_APSIZE, + previous_size->size_value); +} + + +static int intel_7505_configure(void) +{ + u32 temp; + aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge.current_size); + + /* aperture size */ + pci_write_config_word(agp_bridge.dev, INTEL_I7505_APSIZE, + current_size->size_value); + + /* address to map to */ + pci_read_config_dword(agp_bridge.dev, INTEL_I7505_NAPBASELO, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase */ + pci_write_config_dword(agp_bridge.dev, INTEL_I7505_ATTBASE, + agp_bridge.gatt_bus_addr); + + /* agpctrl */ + pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, 0x0000); + + /* clear error registers */ + pci_write_config_byte(agp_bridge.dev, INTEL_I7505_ERRSTS, 0xff); + return 0; +} + +static aper_size_info_16 intel_7505_sizes[7] = +{ + {256, 65536, 6, 0xf00}, + {128, 32768, 5, 0xf20}, + {64, 16384, 4, 0xf30}, + {32, 8192, 3, 0xf38}, + {16, 4096, 2, 0xf3c}, + {8, 2048, 1, 0xf3e}, + {4, 1024, 0, 0xf3f} +}; + + +static int __init intel_7505_setup (struct pci_dev *pdev) +{ + agp_bridge.masks = intel_generic_masks; + agp_bridge.num_of_masks = 1; + agp_bridge.aperture_sizes = (void *) intel_7505_sizes; + agp_bridge.size_type = U16_APER_SIZE; + agp_bridge.num_aperture_sizes = 7; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_7505_configure; + agp_bridge.fetch_size = intel_7505_fetch_size; + agp_bridge.cleanup = intel_7505_cleanup; + agp_bridge.tlb_flush = intel_7505_tlbflush; + agp_bridge.mask_memory = intel_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_3_0_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + return 0; +} + +struct agp_device_ids i7x05_agp_device_ids[] __initdata = +{ + { + .device_id = PCI_DEVICE_ID_INTEL_7505_0, + .chipset = INTEL_I7505, + .chipset_name = "i7505", + }, + { + .device_id = PCI_DEVICE_ID_INTEL_7205_0, + .chipset = INTEL_I7505, + .chipset_name = "i7205", + }, + { }, /* dummy final entry, always present */ +}; + +/* scan table above for supported devices */ +static int __init agp_lookup_host_bridge (struct pci_dev *pdev) +{ + int j=0; + struct agp_device_ids *devs; + + devs = i7x05_agp_device_ids; + + while (devs[j].chipset_name != NULL) { + if (pdev->device == devs[j].device_id) { + printk (KERN_INFO PFX "Detected Intel %s chipset\n", + devs[j].chipset_name); + agp_bridge.type = devs[j].chipset; + + if (devs[j].chipset_setup != NULL) + return devs[j].chipset_setup(pdev); + else + return intel_7505_setup(pdev); + } + j++; + } + + printk(KERN_ERR PFX "Unsupported Intel chipset (device id: %04x),", + pdev->device); + return -ENODEV; +} + + +static int __init agp_find_supported_device(struct pci_dev *dev) +{ + agp_bridge.dev = dev; + + if (pci_find_capability(dev, PCI_CAP_ID_AGP)==0) + return -ENODEV; + + /* probe for known chipsets */ + return agp_lookup_host_bridge(dev); +} + + +static int agp_i7x05_probe (struct pci_dev *dev, const struct pci_device_id *ent) +{ + if (agp_find_supported_device(dev) == 0) { + agp_register_driver(dev); + return 0; + } + return -ENODEV; +} + +static struct pci_device_id agp_i7x05_pci_table[] __initdata = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, agp_i7x05_pci_table); + +static struct pci_driver agp_i7x05_pci_driver = { + .name = "agpgart-i7x05", + .id_table = agp_i7x05_pci_table, + .probe = agp_i7x05_probe, +}; + +int __init agp_i7x05_init(void) +{ + int ret_val; + + ret_val = pci_module_init(&agp_i7x05_pci_driver); + if (ret_val) + agp_bridge.type = NOT_SUPPORTED; + + return ret_val; +} + +static void __exit agp_i7x05_cleanup(void) +{ + agp_unregister_driver(); + pci_unregister_driver(&agp_i7x05_pci_driver); +} + +module_init(agp_i7x05_init); +module_exit(agp_i7x05_cleanup); + +MODULE_AUTHOR("Matthew E Tolentino "); +MODULE_LICENSE("GPL and additional rights"); + diff -urpN --exclude-from=/home/davej/.exclude bk-linus/drivers/char/agp/intel-agp.c agpgart/drivers/char/agp/intel-agp.c --- bk-linus/drivers/char/agp/intel-agp.c 2002-12-13 18:51:26.000000000 -0100 +++ agpgart/drivers/char/agp/intel-agp.c 2002-12-13 18:16:25.000000000 -0100 @@ -1473,6 +1473,11 @@ static struct pci_driver agp_intel_pci_d static int __init agp_intel_init(void) { int ret_val; + static int agp_initialised=0; + + if (agp_initialised==1) + return 0; + agp_initialised=1; ret_val = pci_module_init(&agp_intel_pci_driver); if (ret_val) diff -urpN --exclude-from=/home/davej/.exclude bk-linus/include/linux/agp_backend.h agpgart/include/linux/agp_backend.h --- bk-linus/include/linux/agp_backend.h 2002-12-13 18:52:38.000000000 -0100 +++ agpgart/include/linux/agp_backend.h 2002-12-13 18:17:57.000000000 -0100 @@ -51,6 +51,7 @@ enum chipset_type { INTEL_I850, INTEL_I860, INTEL_460GX, + INTEL_I7505, VIA_GENERIC, VIA_VP3, VIA_MVP3, diff -urpN --exclude-from=/home/davej/.exclude bk-linus/include/linux/pci_ids.h agpgart/include/linux/pci_ids.h --- bk-linus/include/linux/pci_ids.h 2002-12-13 18:52:40.000000000 -0100 +++ agpgart/include/linux/pci_ids.h 2002-12-13 18:18:07.000000000 -0100 @@ -1727,6 +1727,9 @@ #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222 #define PCI_DEVICE_ID_INTEL_7116 0x1223 +#define PCI_DEVICE_ID_INTEL_7505_0 0x2550 +#define PCI_DEVICE_ID_INTEL_7505_1 0x2552 +#define PCI_DEVICE_ID_INTEL_7205_0 0x255d #define PCI_DEVICE_ID_INTEL_82596 0x1226 #define PCI_DEVICE_ID_INTEL_82865 0x1227 #define PCI_DEVICE_ID_INTEL_82557 0x1229