Merge branch 'sh/alphaproject' into sh-latest

This commit is contained in:
Paul Mundt 2011-01-13 18:38:28 +09:00
commit 8b6f08eaef
54 changed files with 6694 additions and 1000 deletions

View File

@ -457,6 +457,9 @@ ChangeLog
Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog. Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
2.1.30:
- Fix writev() (it kept writing the first segment over and over again
instead of moving onto subsequent segments).
2.1.29: 2.1.29:
- Fix a deadlock when mounting read-write. - Fix a deadlock when mounting read-write.
2.1.28: 2.1.28:

View File

@ -4383,11 +4383,11 @@ F: Documentation/scsi/NinjaSCSI.txt
F: drivers/scsi/nsp32* F: drivers/scsi/nsp32*
NTFS FILESYSTEM NTFS FILESYSTEM
M: Anton Altaparmakov <aia21@cantab.net> M: Anton Altaparmakov <anton@tuxera.com>
L: linux-ntfs-dev@lists.sourceforge.net L: linux-ntfs-dev@lists.sourceforge.net
W: http://www.linux-ntfs.org/ W: http://www.tuxera.com/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
S: Maintained S: Supported
F: Documentation/filesystems/ntfs.txt F: Documentation/filesystems/ntfs.txt
F: fs/ntfs/ F: fs/ntfs/

View File

@ -3,6 +3,9 @@ menu "Board support"
config SOLUTION_ENGINE config SOLUTION_ENGINE
bool bool
config SH_ALPHA_BOARD
bool
config SH_SOLUTION_ENGINE config SH_SOLUTION_ENGINE
bool "SolutionEngine" bool "SolutionEngine"
select SOLUTION_ENGINE select SOLUTION_ENGINE
@ -320,6 +323,21 @@ config SH_SH2007
Compact Flash socket, two serial ports and PC-104 bus. Compact Flash socket, two serial ports and PC-104 bus.
More information at <http://sh2000.sh-linux.org>. More information at <http://sh2000.sh-linux.org>.
config SH_APSH4A3A
bool "AP-SH4A-3A"
select SH_ALPHA_BOARD
depends on CPU_SUBTYPE_SH7785
help
Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A.
config SH_APSH4AD0A
bool "AP-SH4AD-0A"
select SH_ALPHA_BOARD
select SYS_SUPPORTS_PCI
depends on CPU_SUBTYPE_SH7786
help
Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A.
endmenu endmenu
source "arch/sh/boards/mach-r2d/Kconfig" source "arch/sh/boards/mach-r2d/Kconfig"

View File

@ -13,3 +13,5 @@ obj-$(CONFIG_SH_ESPT) += board-espt.o
obj-$(CONFIG_SH_POLARIS) += board-polaris.o obj-$(CONFIG_SH_POLARIS) += board-polaris.o
obj-$(CONFIG_SH_TITAN) += board-titan.o obj-$(CONFIG_SH_TITAN) += board-titan.o
obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o obj-$(CONFIG_SH_SH7757LCR) += board-sh7757lcr.o
obj-$(CONFIG_SH_APSH4A3A) += board-apsh4a3a.o
obj-$(CONFIG_SH_APSH4AD0A) += board-apsh4ad0a.o

View File

@ -0,0 +1,175 @@
/*
* ALPHAPROJECT AP-SH4A-3A Support.
*
* Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
* Copyright (C) 2008 Yoshihiro Shimoda
* Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mtd/physmap.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
#include <asm/sizes.h>
#include <asm/clock.h>
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
.offset = 0x00000000,
.size = 512 * 1024,
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = 512 * 1024,
},
{
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = 4 * 1024 * 1024,
},
{
.name = "data",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data nor_flash_data = {
.width = 4,
.parts = nor_flash_partitions,
.nr_parts = ARRAY_SIZE(nor_flash_partitions),
};
static struct resource nor_flash_resources[] = {
[0] = {
.start = 0x00000000,
.end = 0x01000000 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device nor_flash_device = {
.name = "physmap-flash",
.dev = {
.platform_data = &nor_flash_data,
},
.num_resources = ARRAY_SIZE(nor_flash_resources),
.resource = nor_flash_resources,
};
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = 0xA4000000,
.end = 0xA4000000 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = evt2irq(0x200),
.end = evt2irq(0x200),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_16BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct platform_device *apsh4a3a_devices[] __initdata = {
&nor_flash_device,
&smsc911x_device,
};
static int __init apsh4a3a_devices_setup(void)
{
return platform_add_devices(apsh4a3a_devices,
ARRAY_SIZE(apsh4a3a_devices));
}
device_initcall(apsh4a3a_devices_setup);
static int apsh4a3a_clk_init(void)
{
struct clk *clk;
int ret;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
return ret;
}
/* Initialize the board */
static void __init apsh4a3a_setup(char **cmdline_p)
{
printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n");
}
static void __init apsh4a3a_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ7654);
}
/* Return the board specific boot mode pin configuration */
static int apsh4a3a_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of SW1 and SW2.
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value &= ~MODE_PIN0; /* Clock Mode 16 */
value &= ~MODE_PIN1;
value &= ~MODE_PIN2;
value &= ~MODE_PIN3;
value |= MODE_PIN4;
value &= ~MODE_PIN5; /* 16-bit Area0 bus width */
value |= MODE_PIN6; /* Area 0 SRAM interface */
value |= MODE_PIN7;
value |= MODE_PIN8; /* Little Endian */
value |= MODE_PIN9; /* Master Mode */
value |= MODE_PIN10; /* Crystal resonator */
value |= MODE_PIN11; /* Display Unit */
value |= MODE_PIN12;
value &= ~MODE_PIN13; /* 29-bit address mode */
value |= MODE_PIN14; /* No PLL step-up */
return value;
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_apsh4a3a __initmv = {
.mv_name = "AP-SH4A-3A",
.mv_setup = apsh4a3a_setup,
.mv_clk_init = apsh4a3a_clk_init,
.mv_init_irq = apsh4a3a_init_irq,
.mv_mode_pins = apsh4a3a_mode_pins,
};

View File

@ -0,0 +1,125 @@
/*
* ALPHAPROJECT AP-SH4AD-0A Support.
*
* Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
* Copyright (C) 2010 Matt Fleming
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
#include <asm/sizes.h>
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = 0xA4000000,
.end = 0xA4000000 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = evt2irq(0x200),
.end = evt2irq(0x200),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_16BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct platform_device *apsh4ad0a_devices[] __initdata = {
&smsc911x_device,
};
static int __init apsh4ad0a_devices_setup(void)
{
return platform_add_devices(apsh4ad0a_devices,
ARRAY_SIZE(apsh4ad0a_devices));
}
device_initcall(apsh4ad0a_devices_setup);
static int apsh4ad0a_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of SW1 and SW2.
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value |= MODE_PIN0; /* Clock Mode 3 */
value |= MODE_PIN1;
value &= ~MODE_PIN2;
value &= ~MODE_PIN3;
value &= ~MODE_PIN4; /* 16-bit Area0 bus width */
value |= MODE_PIN5;
value |= MODE_PIN6;
value |= MODE_PIN7; /* Normal mode */
value |= MODE_PIN8; /* Little Endian */
value |= MODE_PIN9; /* Crystal resonator */
value &= ~MODE_PIN10; /* 29-bit address mode */
value &= ~MODE_PIN11; /* PCI-E Root port */
value &= ~MODE_PIN12; /* 4 lane + 1 lane */
value |= MODE_PIN13; /* AUD Enable */
value &= ~MODE_PIN14; /* Normal Operation */
return value;
}
static int apsh4ad0a_clk_init(void)
{
struct clk *clk;
int ret;
clk = clk_get(NULL, "extal");
if (!clk || IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
return ret;
}
/* Initialize the board */
static void __init apsh4ad0a_setup(char **cmdline_p)
{
pr_info("Alpha Project AP-SH4AD-0A support:\n");
}
static void __init apsh4ad0a_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ3210);
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_apsh4ad0a __initmv = {
.mv_name = "AP-SH4AD-0A",
.mv_setup = apsh4ad0a_setup,
.mv_mode_pins = apsh4ad0a_mode_pins,
.mv_clk_init = apsh4ad0a_clk_init,
.mv_init_irq = apsh4ad0a_init_irq,
};

View File

@ -0,0 +1,102 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7785=y
CONFIG_MEMORY_START=0x0C000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SH_APSH4A3A=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_KEXEC=y
CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_SH7785FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_CIFS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_FTRACE is not set
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set

View File

@ -0,0 +1,133 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
CONFIG_CGROUP_NS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEM_RES_CTLR=y
CONFIG_BLK_CGROUP=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_HUGETLB_PAGE_SIZE_1MB=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_SH_STORE_QUEUES=y
CONFIG_SH_APSH4AD0A=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_SH_CPU_FREQ=y
CONFIG_KEXEC=y
CONFIG_SECCOMP=y
CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_PM=y
CONFIG_PM_DEBUG=y
CONFIG_PM_VERBOSE=y
CONFIG_PM_RUNTIME=y
CONFIG_CPU_IDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CFI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
CONFIG_MDIO_BITBANG=y
CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_SH7785FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_LOGO=y
CONFIG_USB=y
CONFIG_USB_DEBUG=y
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_CIFS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_932=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_VM=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DWARF_UNWINDER=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set

View File

@ -9,6 +9,7 @@ SE SH_SOLUTION_ENGINE
HIGHLANDER SH_HIGHLANDER HIGHLANDER SH_HIGHLANDER
RTS7751R2D SH_RTS7751R2D RTS7751R2D SH_RTS7751R2D
RSK SH_RSK RSK SH_RSK
ALPHA_BOARD SH_ALPHA_BOARD
# #
# List of companion chips / MFDs. # List of companion chips / MFDs.
@ -61,3 +62,5 @@ ESPT SH_ESPT
POLARIS SH_POLARIS POLARIS SH_POLARIS
KFR2R09 SH_KFR2R09 KFR2R09 SH_KFR2R09
ECOVEC SH_ECOVEC ECOVEC SH_ECOVEC
APSH4A3A SH_APSH4A3A
APSH4AD0A SH_APSH4AD0A

View File

@ -464,6 +464,7 @@ config XEN_BLKDEV_FRONTEND
tristate "Xen virtual block device support" tristate "Xen virtual block device support"
depends on XEN depends on XEN
default y default y
select XEN_XENBUS_FRONTEND
help help
This driver implements the front-end of the Xen virtual This driver implements the front-end of the Xen virtual
block device driver. It communicates with a back-end driver block device driver. It communicates with a back-end driver

View File

@ -94,6 +94,8 @@
#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) #define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
#define G4x_GMCH_SIZE_VT_2M (0xc << 8) #define G4x_GMCH_SIZE_VT_2M (0xc << 8)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define I810_DRAM_CTL 0x3000 #define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001 #define I810_DRAM_ROW_0 0x00000001
#define I810_DRAM_ROW_0_SDRAM 0x00000001 #define I810_DRAM_ROW_0_SDRAM 0x00000001

View File

@ -688,14 +688,14 @@ static int intel_gtt_init(void)
intel_private.base.stolen_size = intel_gtt_stolen_size(); intel_private.base.stolen_size = intel_gtt_stolen_size();
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page(); ret = intel_gtt_setup_scratch_page();
if (ret != 0) { if (ret != 0) {
intel_gtt_cleanup(); intel_gtt_cleanup();
return ret; return ret;
} }
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
return 0; return 0;
} }
@ -814,6 +814,12 @@ static bool intel_enable_gtt(void)
} }
} }
/* On the resume path we may be adjusting the PGTBL value, so
* be paranoid and flush all chipset write buffers...
*/
if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
reg = intel_private.registers+I810_PGETBL_CTL; reg = intel_private.registers+I810_PGETBL_CTL;
writel(intel_private.PGETBL_save, reg); writel(intel_private.PGETBL_save, reg);
if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
@ -823,6 +829,9 @@ static bool intel_enable_gtt(void)
return false; return false;
} }
if (INTEL_GTT_GEN >= 3)
writel(0, intel_private.registers+GFX_FLSH_CNTL);
return true; return true;
} }
@ -991,14 +1000,14 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0) if (mem->page_count == 0)
return 0; return 0;
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) { if (intel_private.base.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL; mem->sg_list = NULL;
mem->num_sg = 0; mem->num_sg = 0;
} }
intel_gtt_clear_range(pg_start, mem->page_count);
return 0; return 0;
} }

View File

@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
} }
} }
static const char *agp_type_str(int type)
{
switch (type) {
case 0: return " uncached";
case 1: return " snooped";
default: return "";
}
}
static void static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{ {
seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s", seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
&obj->base, &obj->base,
get_pin_flag(obj), get_pin_flag(obj),
get_tiling_flag(obj), get_tiling_flag(obj),
@ -118,6 +127,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.write_domain, obj->base.write_domain,
obj->last_rendering_seqno, obj->last_rendering_seqno,
obj->last_fenced_seqno, obj->last_fenced_seqno,
agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
obj->dirty ? " dirty" : "", obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name) if (obj->base.name)
@ -276,6 +286,37 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
return 0; return 0;
} }
static int i915_gem_gtt_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
total_gtt_size += obj->gtt_space->size;
count++;
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return 0;
}
static int i915_gem_pageflip_info(struct seq_file *m, void *data) static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{ {
@ -456,8 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
} }
seq_printf(m, "Interrupts received: %d\n", seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received)); atomic_read(&dev_priv->irq_received));
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_RINGS; i++) {
if (IS_GEN6(dev)) {
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
dev_priv->ring[i].name,
I915_READ_IMR(&dev_priv->ring[i]));
}
i915_ring_seqno_info(m, &dev_priv->ring[i]); i915_ring_seqno_info(m, &dev_priv->ring[i]);
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
@ -656,7 +703,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count); seq_printf(m, "%s [%d]:\n", name, count);
while (count--) { while (count--) {
seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s%s",
err->gtt_offset, err->gtt_offset,
err->size, err->size,
err->read_domains, err->read_domains,
@ -666,7 +713,8 @@ static void print_error_buffers(struct seq_file *m,
tiling_flag(err->tiling), tiling_flag(err->tiling),
dirty_flag(err->dirty), dirty_flag(err->dirty),
purgeable_flag(err->purgeable), purgeable_flag(err->purgeable),
ring_str(err->ring)); ring_str(err->ring),
agp_type_str(err->agp_type));
if (err->name) if (err->name)
seq_printf(m, " (name: %d)", err->name); seq_printf(m, " (name: %d)", err->name);
@ -744,7 +792,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->batchbuffer[i]) { if (error->batchbuffer[i]) {
struct drm_i915_error_object *obj = error->batchbuffer[i]; struct drm_i915_error_object *obj = error->batchbuffer[i];
seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0; offset = 0;
for (page = 0; page < obj->page_count; page++) { for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) { for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@ -890,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL); u32 rgvmodectl = I915_READ(MEMMODECTL);
u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); u32 rstdbyctl = I915_READ(RSTDBYCTL);
u16 crstandvid = I915_READ16(CRSTANDVID); u16 crstandvid = I915_READ16(CRSTANDVID);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@ -913,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n", seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
seq_printf(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
seq_printf(m, "on\n");
break;
case RSX_STATUS_RC1:
seq_printf(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
seq_printf(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
seq_printf(m, "RS1\n");
break;
case RSX_STATUS_RS2:
seq_printf(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
seq_printf(m, "RC3 (RC6+)\n");
break;
default:
seq_printf(m, "unknown\n");
break;
}
return 0; return 0;
} }
@ -1187,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
static struct drm_info_list i915_debugfs_list[] = { static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0, 0}, {"i915_capabilities", i915_capabilities, 0, 0},
{"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},

View File

@ -1962,13 +1962,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */ /* enable GEM by default */
dev_priv->has_gem = 1; dev_priv->has_gem = 1;
if (dev_priv->has_gem == 0 &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
ret = -ENODEV;
goto out_workqueue_free;
}
dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
@ -2055,7 +2048,6 @@ out_gem_unload:
intel_teardown_gmbus(dev); intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
out_workqueue_free:
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
out_iomapfree: out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping); io_mapping_free(dev_priv->mm.gtt_mapping);

View File

@ -49,6 +49,9 @@ module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_lvds_downclock = 0; unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver; static struct drm_driver driver;
extern int intel_agp_enabled; extern int intel_agp_enabled;
@ -352,6 +355,9 @@ static int i915_drm_thaw(struct drm_device *dev)
/* Resume the modeset for every activated CRTC */ /* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev); drm_helper_resume_force_mode(dev);
if (dev_priv->renderctx && dev_priv->pwrctx)
ironlake_enable_rc6(dev);
} }
intel_opregion_init(dev); intel_opregion_init(dev);
@ -475,6 +481,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true; bool need_display = true;
int ret; int ret;
if (!i915_try_reset)
return 0;
if (!mutex_trylock(&dev->struct_mutex)) if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY; return -EBUSY;

View File

@ -172,20 +172,21 @@ struct drm_i915_error_state {
int page_count; int page_count;
u32 gtt_offset; u32 gtt_offset;
u32 *pages[0]; u32 *pages[0];
} *ringbuffer, *batchbuffer[2]; } *ringbuffer, *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer { struct drm_i915_error_buffer {
size_t size; u32 size;
u32 name; u32 name;
u32 seqno; u32 seqno;
u32 gtt_offset; u32 gtt_offset;
u32 read_domains; u32 read_domains;
u32 write_domain; u32 write_domain;
u32 fence_reg; s32 fence_reg:5;
s32 pinned:2; s32 pinned:2;
u32 tiling:2; u32 tiling:2;
u32 dirty:1; u32 dirty:1;
u32 purgeable:1; u32 purgeable:1;
u32 ring:4; u32 ring:4;
u32 agp_type:1;
} *active_bo, *pinned_bo; } *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count; u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay; struct intel_overlay_error_state *overlay;
@ -332,6 +333,7 @@ typedef struct drm_i915_private {
/* LVDS info */ /* LVDS info */
int backlight_level; /* restore backlight to this value */ int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *panel_fixed_mode; struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@ -794,6 +796,7 @@ struct drm_i915_gem_object {
*/ */
struct hlist_node exec_node; struct hlist_node exec_node;
unsigned long exec_handle; unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
/** /**
* Current offset of the object in GTT space. * Current offset of the object in GTT space.
@ -1006,12 +1009,6 @@ extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc); extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data, extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
u32 mask);
extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
u32 mask);
void void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@ -1091,10 +1088,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_flush_ring(struct drm_device *dev, int __must_check i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains); uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
@ -1265,6 +1262,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev); extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);

View File

@ -35,18 +35,18 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/pci.h> #include <linux/pci.h>
static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
bool write); bool write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t offset,
uint64_t size); uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment, unsigned alignment,
bool map_and_fenceable); bool map_and_fenceable);
static void i915_gem_clear_fence_reg(struct drm_device *dev, static void i915_gem_clear_fence_reg(struct drm_device *dev,
struct drm_i915_fence_reg *reg); struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev, static int i915_gem_phys_pwrite(struct drm_device *dev,
@ -1935,6 +1935,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv; drm_i915_private_t *dev_priv;
struct drm_device *dev; struct drm_device *dev;
bool idle;
int i;
dev_priv = container_of(work, drm_i915_private_t, dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work); mm.retire_work.work);
@ -1948,11 +1950,31 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended && /* Send a periodic flush down the ring so we don't hold onto GEM
(!list_empty(&dev_priv->ring[RCS].request_list) || * objects indefinitely.
!list_empty(&dev_priv->ring[VCS].request_list) || */
!list_empty(&dev_priv->ring[BCS].request_list))) idle = true;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
ret = i915_gem_flush_ring(dev, ring, 0,
I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
i915_add_request(dev, NULL, request, ring))
kfree(request);
}
idle &= list_empty(&ring->request_list);
}
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
@ -2142,25 +2164,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret; return ret;
} }
void int
i915_gem_flush_ring(struct drm_device *dev, i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains) uint32_t flush_domains)
{ {
ring->flush(ring, invalidate_domains, flush_domains); int ret;
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
i915_gem_process_flushing_list(dev, flush_domains, ring); i915_gem_process_flushing_list(dev, flush_domains, ring);
return 0;
} }
static int i915_ring_idle(struct drm_device *dev, static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
int ret;
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0; return 0;
if (!list_empty(&ring->gpu_write_list)) if (!list_empty(&ring->gpu_write_list)) {
i915_gem_flush_ring(dev, ring, ret = i915_gem_flush_ring(dev, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
return i915_wait_request(dev, return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring), i915_gem_next_request_seqno(dev, ring),
ring); ring);
@ -2370,10 +2404,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int ret; int ret;
if (obj->fenced_gpu_access) { if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, ret = i915_gem_flush_ring(obj->base.dev,
obj->last_fenced_ring, obj->last_fenced_ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false; obj->fenced_gpu_access = false;
} }
@ -2393,6 +2430,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->last_fenced_ring = NULL; obj->last_fenced_ring = NULL;
} }
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
return 0; return 0;
} }
@ -2523,9 +2566,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
return ret; return ret;
} else if (obj->tiling_changed) { } else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) { if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, obj->ring, ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false; obj->fenced_gpu_access = false;
} }
@ -2736,10 +2782,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->gtt_space = NULL; obj->gtt_space = NULL;
if (ret == -ENOMEM) { if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */ /* first try to reclaim some memory by clearing the GTT */
ret = i915_gem_evict_something(dev, size, ret = i915_gem_evict_everything(dev, false);
alignment,
map_and_fenceable);
if (ret) { if (ret) {
/* now try to shrink everyone else */ /* now try to shrink everyone else */
if (gfpmask) { if (gfpmask) {
@ -2747,7 +2791,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free; goto search_free;
} }
return ret; return -ENOMEM;
} }
goto search_free; goto search_free;
@ -2762,9 +2806,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
drm_mm_put_block(obj->gtt_space); drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL; obj->gtt_space = NULL;
ret = i915_gem_evict_something(dev, size, if (i915_gem_evict_everything(dev, false))
alignment, map_and_fenceable);
if (ret)
return ret; return ret;
goto search_free; goto search_free;
@ -2811,17 +2853,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
} }
/** Flushes any GPU write domain for the object if it's dirty. */ /** Flushes any GPU write domain for the object if it's dirty. */
static void static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return; return 0;
/* Queue the GPU write cache flushing we need. */ /* Queue the GPU write cache flushing we need. */
i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
BUG_ON(obj->base.write_domain);
} }
/** Flushes the GTT write domain for the object if it's dirty. */ /** Flushes the GTT write domain for the object if it's dirty. */
@ -2833,10 +2874,16 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return; return;
/* No actual flushing is required for the GTT write domain. Writes /* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's * to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache. * no chipset flush. It also doesn't land in render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
* the GATT itself.
*/ */
wmb();
i915_gem_release_mmap(obj); i915_gem_release_mmap(obj);
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
@ -2882,7 +2929,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) { if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
@ -2927,7 +2977,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
/* Currently, we are always called from an non-interruptible context. */ /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) { if (pipelined != obj->ring) {
@ -2952,12 +3005,17 @@ int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible) bool interruptible)
{ {
int ret;
if (!obj->active) if (!obj->active)
return 0; return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, obj->ring, ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
return i915_gem_object_wait_rendering(obj, interruptible); return i915_gem_object_wait_rendering(obj, interruptible);
} }
@ -2974,7 +3032,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains; uint32_t old_write_domain, old_read_domains;
int ret; int ret;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
return ret; return ret;
@ -3069,7 +3130,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (offset == 0 && size == obj->base.size) if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0); return i915_gem_object_set_to_cpu_domain(obj, 0);
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
return ret; return ret;
@ -3362,8 +3426,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial. * flush earlier is beneficial.
*/ */
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(dev, obj->ring, ret = i915_gem_flush_ring(dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request == } else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) { obj->last_rendering_seqno) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;

View File

@ -127,9 +127,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
} }
/* Nothing found, clean up and bail out! */ /* Nothing found, clean up and bail out! */
list_for_each_entry(obj, &unwind_list, exec_list) { while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
ret = drm_mm_scan_remove_block(obj->gtt_space); ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret); BUG_ON(ret);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
} }
@ -162,6 +168,7 @@ found:
exec_list); exec_list);
if (ret == 0) if (ret == 0)
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
list_del_init(&obj->exec_list); list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
} }

View File

@ -268,7 +268,6 @@ eb_destroy(struct eb_objects *eb)
static int static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb, struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *reloc) struct drm_i915_gem_relocation_entry *reloc)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
@ -411,10 +410,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb, struct eb_objects *eb)
struct drm_i915_gem_exec_object2 *entry)
{ {
struct drm_i915_gem_relocation_entry __user *user_relocs; struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret; int i, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
@ -426,7 +425,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
sizeof(reloc))) sizeof(reloc)))
return -EFAULT; return -EFAULT;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc); ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret) if (ret)
return ret; return ret;
@ -442,13 +441,13 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb, struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs) struct drm_i915_gem_relocation_entry *relocs)
{ {
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret; int i, ret;
for (i = 0; i < entry->relocation_count; i++) { for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]); ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
if (ret) if (ret)
return ret; return ret;
} }
@ -459,8 +458,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate(struct drm_device *dev, i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb, struct eb_objects *eb,
struct list_head *objects, struct list_head *objects)
struct drm_i915_gem_exec_object2 *exec)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
@ -468,7 +466,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++); ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret) if (ret)
return ret; return ret;
} }
@ -479,13 +477,36 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
static int static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct list_head *objects, struct list_head *objects)
struct drm_i915_gem_exec_object2 *exec)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_exec_object2 *entry;
int ret, retry; int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects;
INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) {
struct drm_i915_gem_exec_object2 *entry;
bool need_fence, need_mappable;
obj = list_first_entry(objects,
struct drm_i915_gem_object,
exec_list);
entry = obj->exec_entry;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
else
list_move_tail(&obj->exec_list, &ordered_objects);
}
list_splice(&ordered_objects, objects);
/* Attempt to pin all of the buffers into the GTT. /* Attempt to pin all of the buffers into the GTT.
* This is done in 3 phases: * This is done in 3 phases:
@ -504,14 +525,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
ret = 0; ret = 0;
/* Unbind any ill-fitting objects or pin. */ /* Unbind any ill-fitting objects or pin. */
entry = exec;
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable; bool need_fence, need_mappable;
if (!obj->gtt_space)
if (!obj->gtt_space) {
entry++;
continue; continue;
}
need_fence = need_fence =
has_fenced_gpu_access && has_fenced_gpu_access &&
@ -534,8 +552,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} }
/* Bind fresh objects */ /* Bind fresh objects */
entry = exec;
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence; bool need_fence;
need_fence = need_fence =
@ -570,7 +588,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} }
entry->offset = obj->gtt_offset; entry->offset = obj->gtt_offset;
entry++;
} }
/* Decrement pin count for bound objects */ /* Decrement pin count for bound objects */
@ -622,7 +639,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
int i, total, ret; int i, total, ret;
/* We may process another execbuffer during the unlock... */ /* We may process another execbuffer during the unlock... */
while (list_empty(objects)) { while (!list_empty(objects)) {
obj = list_first_entry(objects, obj = list_first_entry(objects,
struct drm_i915_gem_object, struct drm_i915_gem_object,
exec_list); exec_list);
@ -665,7 +682,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
} }
/* reacquire the objects */ /* reacquire the objects */
INIT_LIST_HEAD(objects);
eb_reset(eb); eb_reset(eb);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
@ -681,10 +697,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
list_add_tail(&obj->exec_list, objects); list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle; obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj); eb_add_object(eb, obj);
} }
ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); ret = i915_gem_execbuffer_reserve(ring, file, objects);
if (ret) if (ret)
goto err; goto err;
@ -693,7 +710,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
exec,
reloc + total); reloc + total);
if (ret) if (ret)
goto err; goto err;
@ -713,25 +729,34 @@ err:
return ret; return ret;
} }
static void static int
i915_gem_execbuffer_flush(struct drm_device *dev, i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains, uint32_t flush_domains,
uint32_t flush_rings) uint32_t flush_rings)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int i; int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU) if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) if (flush_rings & (1 << i)) {
i915_gem_flush_ring(dev, &dev_priv->ring[i], ret = i915_gem_flush_ring(dev,
invalidate_domains, &dev_priv->ring[i],
flush_domains); invalidate_domains,
flush_domains);
if (ret)
return ret;
}
} }
return 0;
} }
static int static int
@ -795,10 +820,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains); cd.flush_domains);
#endif #endif
i915_gem_execbuffer_flush(ring->dev, ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains, cd.flush_domains,
cd.flush_rings); cd.flush_rings);
if (ret)
return ret;
} }
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
@ -921,7 +948,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 flush_domains; u32 invalidate;
/* /*
* Ensure that the commands in the batch buffer are * Ensure that the commands in the batch buffer are
@ -929,11 +956,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
* *
* The sampler always gets flushed on i965 (sigh). * The sampler always gets flushed on i965 (sigh).
*/ */
flush_domains = 0; invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
flush_domains |= I915_GEM_DOMAIN_SAMPLER; invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); i915_gem_next_request_seqno(dev, ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL); request = kzalloc(sizeof(*request), GFP_KERNEL);
@ -1098,16 +1127,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
list_add_tail(&obj->exec_list, &objects); list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle; obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj); eb_add_object(eb, obj);
} }
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */ /* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec); ret = i915_gem_execbuffer_reserve(ring, file, &objects);
if (ret) if (ret)
goto err; goto err;
/* The objects are in their final locations, apply the relocations. */ /* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec); ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (ret) { if (ret) {
if (ret == -EFAULT) { if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
@ -1121,9 +1156,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
/* Set the pending read domains for the batch buffer to COMMAND */ /* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = list_entry(objects.prev,
struct drm_i915_gem_object,
exec_list);
if (batch_obj->base.pending_write_domain) { if (batch_obj->base.pending_write_domain) {
DRM_ERROR("Attempting to use self-modifying batch buffer\n"); DRM_ERROR("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL; ret = -EINVAL;
@ -1340,4 +1372,3 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
drm_free_large(exec2_list); drm_free_large(exec2_list);
return ret; return ret;
} }

View File

@ -85,15 +85,11 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mm.gtt->needs_dmar) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
obj->num_sg = 0;
}
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
}
} }

View File

@ -64,26 +64,6 @@
#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
DRM_I915_VBLANK_PIPE_B) DRM_I915_VBLANK_PIPE_B)
void
ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask & mask) != 0) {
dev_priv->gt_irq_mask &= ~mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
}
void
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask & mask) != mask) {
dev_priv->gt_irq_mask |= mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
}
/* For display hotplug interrupt */ /* For display hotplug interrupt */
static void static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@ -105,26 +85,6 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
} }
} }
void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
}
void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
}
static inline u32 static inline u32
i915_pipestat(int pipe) i915_pipestat(int pipe)
{ {
@ -389,9 +349,12 @@ static void notify_ring(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = ring->get_seqno(ring); u32 seqno = ring->get_seqno(ring);
ring->irq_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno); trace_i915_gem_request_complete(dev, seqno);
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue); wake_up_all(&ring->irq_queue);
dev_priv->hangcheck_count = 0; dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@ -435,6 +398,50 @@ static void gen6_pm_irq_handler(struct drm_device *dev)
I915_WRITE(GEN6_PMIIR, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir);
} }
static void pch_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 pch_iir;
pch_iir = I915_READ(SDEIIR);
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
if (pch_iir & SDE_GMBUS)
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
DRM_ERROR("PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) {
u32 fdia, fdib;
fdia = I915_READ(FDI_RXA_IIR);
fdib = I915_READ(FDI_RXB_IIR);
DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
}
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
static irqreturn_t ironlake_irq_handler(struct drm_device *dev) static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -502,8 +509,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
drm_handle_vblank(dev, 1); drm_handle_vblank(dev, 1);
/* check event from PCH */ /* check event from PCH */
if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) if (de_iir & DE_PCH_EVENT) {
queue_work(dev_priv->wq, &dev_priv->hotplug_work); if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
pch_irq_handler(dev);
}
if (de_iir & DE_PCU_EVENT) { if (de_iir & DE_PCU_EVENT) {
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@ -556,10 +566,9 @@ static void i915_error_work_func(struct work_struct *work)
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object * static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev, i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src) struct drm_i915_gem_object *src)
{ {
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst; struct drm_i915_error_object *dst;
int page, page_count; int page, page_count;
u32 reloc_offset; u32 reloc_offset;
@ -632,52 +641,6 @@ i915_error_state_free(struct drm_device *dev,
kfree(error); kfree(error);
} }
static u32
i915_get_bbaddr(struct drm_device *dev, u32 *ring)
{
u32 cmd;
if (IS_I830(dev) || IS_845G(dev))
cmd = MI_BATCH_BUFFER;
else if (INTEL_INFO(dev)->gen >= 4)
cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
MI_BATCH_NON_SECURE_I965);
else
cmd = (MI_BATCH_BUFFER_START | (2 << 6));
return ring[0] == cmd ? ring[1] : 0;
}
static u32
i915_ringbuffer_last_batch(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 head, bbaddr;
u32 *val;
/* Locate the current position in the ringbuffer and walk back
* to find the most recently dispatched batch buffer.
*/
head = I915_READ_HEAD(ring) & HEAD_ADDR;
val = (u32 *)(ring->virtual_start + head);
while (--val >= (u32 *)ring->virtual_start) {
bbaddr = i915_get_bbaddr(dev, val);
if (bbaddr)
return bbaddr;
}
val = (u32 *)(ring->virtual_start + ring->size);
while (--val >= (u32 *)ring->virtual_start) {
bbaddr = i915_get_bbaddr(dev, val);
if (bbaddr)
return bbaddr;
}
return 0;
}
static u32 capture_bo_list(struct drm_i915_error_buffer *err, static u32 capture_bo_list(struct drm_i915_error_buffer *err,
int count, int count,
struct list_head *head) struct list_head *head)
@ -702,6 +665,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
err->dirty = obj->dirty; err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->ring = obj->ring ? obj->ring->id : 0; err->ring = obj->ring ? obj->ring->id : 0;
err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
if (++i == count) if (++i == count)
break; break;
@ -741,6 +705,36 @@ static void i915_gem_record_fences(struct drm_device *dev,
} }
} }
static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
u32 seqno;
if (!ring->get_seqno)
return NULL;
seqno = ring->get_seqno(ring);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
if (!i915_seqno_passed(obj->last_rendering_seqno, seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
}
return NULL;
}
/** /**
* i915_capture_error_state - capture an error record for later analysis * i915_capture_error_state - capture an error record for later analysis
* @dev: drm device * @dev: drm device
@ -755,10 +749,8 @@ static void i915_capture_error_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
struct drm_i915_gem_object *batchbuffer[2];
unsigned long flags; unsigned long flags;
u32 bbaddr; int i;
int count;
spin_lock_irqsave(&dev_priv->error_lock, flags); spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error; error = dev_priv->first_error;
@ -817,83 +809,30 @@ static void i915_capture_error_state(struct drm_device *dev)
} }
i915_gem_record_fences(dev, error); i915_gem_record_fences(dev, error);
bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); /* Record the active batchbuffers */
for (i = 0; i < I915_NUM_RINGS; i++)
/* Grab the current batchbuffer, most likely to have crashed. */ error->batchbuffer[i] =
batchbuffer[0] = NULL; i915_error_first_batchbuffer(dev_priv,
batchbuffer[1] = NULL; &dev_priv->ring[i]);
count = 0;
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (batchbuffer[0] == NULL &&
bbaddr >= obj->gtt_offset &&
bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
error->acthd >= obj->gtt_offset &&
error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
count++;
}
/* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (batchbuffer[0] == NULL &&
bbaddr >= obj->gtt_offset &&
bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
error->acthd >= obj->gtt_offset &&
error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
break;
}
}
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (batchbuffer[0] == NULL &&
bbaddr >= obj->gtt_offset &&
bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
error->acthd >= obj->gtt_offset &&
error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
break;
}
}
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
if (batchbuffer[1] != batchbuffer[0])
error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
else
error->batchbuffer[1] = NULL;
/* Record the ringbuffer */ /* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev, error->ringbuffer = i915_error_object_create(dev_priv,
dev_priv->ring[RCS].obj); dev_priv->ring[RCS].obj);
/* Record buffers on the active and pinned lists. */ /* Record buffers on the active and pinned lists. */
error->active_bo = NULL; error->active_bo = NULL;
error->pinned_bo = NULL; error->pinned_bo = NULL;
error->active_bo_count = count; i = 0;
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
count++; i++;
error->pinned_bo_count = count - error->active_bo_count; error->pinned_bo_count = i - error->active_bo_count;
if (count) { if (i) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*count, error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
GFP_ATOMIC); GFP_ATOMIC);
if (error->active_bo) if (error->active_bo)
error->pinned_bo = error->pinned_bo =
@ -1673,11 +1612,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
if (IS_GEN6(dev)) {
I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
}
if (IS_GEN6(dev)) if (IS_GEN6(dev))
render_irqs = render_irqs =
@ -1698,6 +1632,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else { } else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
I915_WRITE(FDI_RXA_IMR, 0);
I915_WRITE(FDI_RXB_IMR, 0);
} }
dev_priv->pch_irq_mask = ~hotplug_mask; dev_priv->pch_irq_mask = ~hotplug_mask;

View File

@ -145,6 +145,8 @@
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ #define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0) #define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0) #define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
#define MI_OVERLAY_CONTINUE (0x0<<21) #define MI_OVERLAY_CONTINUE (0x0<<21)
@ -159,6 +161,7 @@
#define MI_MM_SPACE_PHYSICAL (0<<8) #define MI_MM_SPACE_PHYSICAL (0<<8)
#define MI_SAVE_EXT_STATE_EN (1<<3) #define MI_SAVE_EXT_STATE_EN (1<<3)
#define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0) #define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
@ -288,6 +291,7 @@
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_ACTHD(base) ((base)+0x74) #define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94) #define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define TAIL_ADDR 0x001FFFF8 #define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000 #define HEAD_WRAP_ONE 0x00200000
@ -1130,9 +1134,50 @@
#define RCBMINAVG 0x111a0 #define RCBMINAVG 0x111a0
#define RCUPEI 0x111b0 #define RCUPEI 0x111b0
#define RCDNEI 0x111b4 #define RCDNEI 0x111b4
#define MCHBAR_RENDER_STANDBY 0x111b8 #define RSTDBYCTL 0x111b8
#define RCX_SW_EXIT (1<<23) #define RS1EN (1<<31)
#define RSX_STATUS_MASK 0x00700000 #define RS2EN (1<<30)
#define RS3EN (1<<29)
#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */
#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */
#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */
#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */
#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */
#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */
#define RSX_STATUS_MASK (7<<20)
#define RSX_STATUS_ON (0<<20)
#define RSX_STATUS_RC1 (1<<20)
#define RSX_STATUS_RC1E (2<<20)
#define RSX_STATUS_RS1 (3<<20)
#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */
#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */
#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */
#define RSX_STATUS_RSVD2 (7<<20)
#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */
#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
#define JRSC (1<<17) /* rsx coupled to cpu c-state */
#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */
#define RS1CONTSAV_MASK (3<<14)
#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */
#define RS1CONTSAV_RSVD (1<<14)
#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */
#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */
#define NORMSLEXLAT_MASK (3<<12)
#define SLOW_RS123 (0<<12)
#define SLOW_RS23 (1<<12)
#define SLOW_RS3 (2<<12)
#define NORMAL_RS123 (3<<12)
#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */
#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */
#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */
#define RS_CSTATE_MASK (3<<4)
#define RS_CSTATE_C367_RS1 (0<<4)
#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
#define RS_CSTATE_RSVD (2<<4)
#define RS_CSTATE_C367_RS2 (3<<4)
#define REDSAVES (1<<3) /* no context save if was idle during rs0 */
#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */
#define VIDCTL 0x111c0 #define VIDCTL 0x111c0
#define VIDSTS 0x111c8 #define VIDSTS 0x111c8
#define VIDSTART 0x111cc /* 8 bits */ #define VIDSTART 0x111cc /* 8 bits */
@ -2345,8 +2390,13 @@
/* Memory latency timer register */ /* Memory latency timer register */
#define MLTR_ILK 0x11222 #define MLTR_ILK 0x11222
#define MLTR_WM1_SHIFT 0
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */ /* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f #define ILK_SRLT_MASK 0x3f
#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */ /* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128 #define ILK_DISPLAY_FIFO 128
@ -2728,12 +2778,41 @@
/* PCH */ /* PCH */
/* south display engine interrupt */ /* south display engine interrupt */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
#define SDE_AUDIO_POWER_SHIFT (25)
#define SDE_AUDIO_POWER_MASK (7 << SDE_AUDIO_POWER_SHIFT)
#define SDE_GMBUS (1 << 24)
#define SDE_AUDIO_HDCP_TRANSB (1 << 23)
#define SDE_AUDIO_HDCP_TRANSA (1 << 22)
#define SDE_AUDIO_HDCP_MASK (3 << 22)
#define SDE_AUDIO_TRANSB (1 << 21)
#define SDE_AUDIO_TRANSA (1 << 20)
#define SDE_AUDIO_TRANS_MASK (3 << 20)
#define SDE_POISON (1 << 19)
/* 18 reserved */
#define SDE_FDI_RXB (1 << 17)
#define SDE_FDI_RXA (1 << 16)
#define SDE_FDI_MASK (3 << 16)
#define SDE_AUXD (1 << 15)
#define SDE_AUXC (1 << 14)
#define SDE_AUXB (1 << 13)
#define SDE_AUX_MASK (7 << 13)
/* 12 reserved */
#define SDE_CRT_HOTPLUG (1 << 11) #define SDE_CRT_HOTPLUG (1 << 11)
#define SDE_PORTD_HOTPLUG (1 << 10) #define SDE_PORTD_HOTPLUG (1 << 10)
#define SDE_PORTC_HOTPLUG (1 << 9) #define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8) #define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6) #define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8) #define SDE_HOTPLUG_MASK (0xf << 8)
#define SDE_TRANSB_CRC_DONE (1 << 5)
#define SDE_TRANSB_CRC_ERR (1 << 4)
#define SDE_TRANSB_FIFO_UNDER (1 << 3)
#define SDE_TRANSA_CRC_DONE (1 << 2)
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
/* CPT */ /* CPT */
#define SDE_CRT_HOTPLUG_CPT (1 << 19) #define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTD_HOTPLUG_CPT (1 << 23)
@ -3174,10 +3253,11 @@
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) #define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) #define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
/* SNB B-stepping */ /* SNB B-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) #define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22)
#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) #define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) #define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) #define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22)
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
#define FORCEWAKE 0xA18C #define FORCEWAKE 0xA18C
@ -3239,6 +3319,7 @@
#define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31) #define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_DATA 0x138128

View File

@ -740,7 +740,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
I915_WRITE(MCHBAR_RENDER_STANDBY, I915_WRITE(RSTDBYCTL,
dev_priv->saveMCHBAR_RENDER_STANDBY); dev_priv->saveMCHBAR_RENDER_STANDBY);
} else { } else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
@ -811,7 +811,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY = dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(MCHBAR_RENDER_STANDBY); I915_READ(RSTDBYCTL);
} else { } else {
dev_priv->saveIER = I915_READ(IER); dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR); dev_priv->saveIMR = I915_READ(IMR);
@ -822,10 +822,6 @@ int i915_save_state(struct drm_device *dev)
if (IS_GEN6(dev)) if (IS_GEN6(dev))
gen6_disable_rps(dev); gen6_disable_rps(dev);
/* XXX disabling the clock gating breaks suspend on gm45
intel_disable_clock_gating(dev);
*/
/* Cache mode state */ /* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);

View File

@ -30,6 +30,7 @@
#include "drm.h" #include "drm.h"
#include "drm_crtc.h" #include "drm_crtc.h"
#include "drm_crtc_helper.h" #include "drm_crtc_helper.h"
#include "drm_edid.h"
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_drm.h" #include "i915_drm.h"
#include "i915_drv.h" #include "i915_drv.h"
@ -287,8 +288,9 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
} }
static bool intel_crt_detect_ddc(struct intel_crt *crt) static bool intel_crt_detect_ddc(struct drm_connector *connector)
{ {
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */ /* CRT should always be at 0, but check anyway */
@ -301,8 +303,26 @@ static bool intel_crt_detect_ddc(struct intel_crt *crt)
} }
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); struct edid *edid;
return true; bool is_digital = false;
edid = drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
*/
if (edid != NULL) {
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
} }
return false; return false;
@ -458,7 +478,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
} }
} }
if (intel_crt_detect_ddc(crt)) if (intel_crt_detect_ddc(connector))
return connector_status_connected; return connector_status_connected;
if (!force) if (!force)
@ -472,7 +492,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
crtc = intel_get_load_detect_pipe(&crt->base, connector, crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode); NULL, &dpms_mode);
if (crtc) { if (crtc) {
if (intel_crt_detect_ddc(crt)) if (intel_crt_detect_ddc(connector))
status = connector_status_connected; status = connector_status_connected;
else else
status = intel_crt_load_detect(crtc, crt); status = intel_crt_load_detect(crtc, crt);

View File

@ -3418,15 +3418,16 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
static bool ironlake_compute_wm0(struct drm_device *dev, static bool ironlake_compute_wm0(struct drm_device *dev,
int pipe, int pipe,
const struct intel_watermark_params *display, const struct intel_watermark_params *display,
int display_latency, int display_latency_ns,
const struct intel_watermark_params *cursor, const struct intel_watermark_params *cursor,
int cursor_latency, int cursor_latency_ns,
int *plane_wm, int *plane_wm,
int *cursor_wm) int *cursor_wm)
{ {
struct drm_crtc *crtc; struct drm_crtc *crtc;
int htotal, hdisplay, clock, pixel_size = 0; int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count, entries; int line_time_us, line_count;
int entries, tlb_miss;
crtc = intel_get_crtc_for_pipe(dev, pipe); crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc->fb == NULL || !crtc->enabled) if (crtc->fb == NULL || !crtc->enabled)
@ -3438,7 +3439,10 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
pixel_size = crtc->fb->bits_per_pixel / 8; pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */ /* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000; entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size); entries = DIV_ROUND_UP(entries, display->cacheline_size);
*plane_wm = entries + display->guard_size; *plane_wm = entries + display->guard_size;
if (*plane_wm > (int)display->max_wm) if (*plane_wm > (int)display->max_wm)
@ -3446,8 +3450,11 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
/* Use the large buffer method to calculate cursor watermark */ /* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock); line_time_us = ((htotal * 1000) / clock);
line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000; line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size; entries = line_count * 64 * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size); entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size; *cursor_wm = entries + cursor->guard_size;
if (*cursor_wm > (int)cursor->max_wm) if (*cursor_wm > (int)cursor->max_wm)
@ -3456,14 +3463,109 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
return true; return true;
} }
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool ironlake_check_srwm(struct drm_device *dev, int level,
int fbc_wm, int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
" cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
if (fbc_wm > SNB_FBC_MAX_SRWM) {
DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
fbc_wm, SNB_FBC_MAX_SRWM, level);
/* fbc has it's own way to disable FBC WM */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
}
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
display_wm, SNB_DISPLAY_MAX_SRWM, level);
return false;
}
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
cursor_wm, SNB_CURSOR_MAX_SRWM, level);
return false;
}
if (!(fbc_wm || display_wm || cursor_wm)) {
DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
return false;
}
return true;
}
/*
* Compute watermark values of WM[1-3],
*/
static bool ironlake_compute_srwm(struct drm_device *dev, int level,
int hdisplay, int htotal,
int pixel_size, int clock, int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
unsigned long line_time_us;
int line_count, line_size;
int small, large;
int entries;
if (!latency_ns) {
*fbc_wm = *display_wm = *cursor_wm = 0;
return false;
}
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
/*
* Spec says:
* FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
*/
*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
return ironlake_check_srwm(dev, level,
*fbc_wm, *display_wm, *cursor_wm,
display, cursor);
}
static void ironlake_update_wm(struct drm_device *dev, static void ironlake_update_wm(struct drm_device *dev,
int planea_clock, int planeb_clock, int planea_clock, int planeb_clock,
int sr_hdisplay, int sr_htotal, int hdisplay, int htotal,
int pixel_size) int pixel_size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int plane_wm, cursor_wm, enabled; int fbc_wm, plane_wm, cursor_wm, enabled;
int tmp; int clock;
enabled = 0; enabled = 0;
if (ironlake_compute_wm0(dev, 0, if (ironlake_compute_wm0(dev, 0,
@ -3498,152 +3600,49 @@ static void ironlake_update_wm(struct drm_device *dev,
* Calculate and update the self-refresh watermark only when one * Calculate and update the self-refresh watermark only when one
* display plane is used. * display plane is used.
*/ */
tmp = 0; I915_WRITE(WM3_LP_ILK, 0);
if (enabled == 1) { I915_WRITE(WM2_LP_ILK, 0);
unsigned long line_time_us; I915_WRITE(WM1_LP_ILK, 0);
int small, large, plane_fbc;
int sr_clock, entries;
int line_count, line_size;
/* Read the self-refresh latency. The unit is 0.5us */
int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
sr_clock = planea_clock ? planea_clock : planeb_clock; if (enabled != 1)
line_time_us = (sr_htotal * 1000) / sr_clock; return;
/* Use ns/us then divide to preserve precision */ clock = planea_clock ? planea_clock : planeb_clock;
line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
/ 1000;
line_size = sr_hdisplay * pixel_size;
/* Use the minimum of the small and large buffer method for primary */ /* WM1 */
small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000; if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
large = line_count * line_size; clock, ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
entries = DIV_ROUND_UP(min(small, large), I915_WRITE(WM1_LP_ILK,
ironlake_display_srwm_info.cacheline_size); WM1_LP_SR_EN |
(ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
plane_fbc = entries * 64; /* WM2 */
plane_fbc = DIV_ROUND_UP(plane_fbc, line_size); if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
clock, ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
plane_wm = entries + ironlake_display_srwm_info.guard_size; I915_WRITE(WM2_LP_ILK,
if (plane_wm > (int)ironlake_display_srwm_info.max_wm) WM2_LP_EN |
plane_wm = ironlake_display_srwm_info.max_wm; (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
/* calculate the self-refresh watermark for display cursor */ (plane_wm << WM1_LP_SR_SHIFT) |
entries = line_count * pixel_size * 64; cursor_wm);
entries = DIV_ROUND_UP(entries,
ironlake_cursor_srwm_info.cacheline_size);
cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
cursor_wm = ironlake_cursor_srwm_info.max_wm;
/* configure watermark and enable self-refresh */
tmp = (WM1_LP_SR_EN |
(ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
(plane_fbc << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
" cursor %d\n", plane_wm, plane_fbc, cursor_wm);
}
I915_WRITE(WM1_LP_ILK, tmp);
/* XXX setup WM2 and WM3 */
}
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*
* Also return true if all of those watermark values is 0, which is set by
* sandybridge_compute_srwm, to indicate the latency is ZERO.
*/
static bool sandybridge_check_srwm(struct drm_device *dev, int level,
int fbc_wm, int display_wm, int cursor_wm)
{
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
" cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
if (fbc_wm > SNB_FBC_MAX_SRWM) {
DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
fbc_wm, SNB_FBC_MAX_SRWM, level);
/* fbc has it's own way to disable FBC WM */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
}
if (display_wm > SNB_DISPLAY_MAX_SRWM) {
DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
display_wm, SNB_DISPLAY_MAX_SRWM, level);
return false;
}
if (cursor_wm > SNB_CURSOR_MAX_SRWM) {
DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
cursor_wm, SNB_CURSOR_MAX_SRWM, level);
return false;
}
if (!(fbc_wm || display_wm || cursor_wm)) {
DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
return false;
}
return true;
}
/*
* Compute watermark values of WM[1-3],
*/
static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
int hdisplay, int htotal, int pixel_size,
int clock, int latency_ns, int *fbc_wm,
int *display_wm, int *cursor_wm)
{
unsigned long line_time_us;
int small, large;
int entries;
int line_count, line_size;
if (!latency_ns) {
*fbc_wm = *display_wm = *cursor_wm = 0;
return false;
}
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
entries = DIV_ROUND_UP(min(small, large),
sandybridge_display_srwm_info.cacheline_size);
*display_wm = entries + sandybridge_display_srwm_info.guard_size;
/* /*
* Spec said: * WM3 is unsupported on ILK, probably because we don't have latency
* FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 * data for that power state
*/ */
*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries,
sandybridge_cursor_srwm_info.cacheline_size);
*cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
return sandybridge_check_srwm(dev, level,
*fbc_wm, *display_wm, *cursor_wm);
} }
static void sandybridge_update_wm(struct drm_device *dev, static void sandybridge_update_wm(struct drm_device *dev,
@ -3652,7 +3651,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
int pixel_size) int pixel_size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY(); int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int fbc_wm, plane_wm, cursor_wm, enabled; int fbc_wm, plane_wm, cursor_wm, enabled;
int clock; int clock;
@ -3701,9 +3700,11 @@ static void sandybridge_update_wm(struct drm_device *dev,
clock = planea_clock ? planea_clock : planeb_clock; clock = planea_clock ? planea_clock : planeb_clock;
/* WM1 */ /* WM1 */
if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
clock, SNB_READ_WM1_LATENCY() * 500, clock, SNB_READ_WM1_LATENCY() * 500,
&fbc_wm, &plane_wm, &cursor_wm)) &sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return; return;
I915_WRITE(WM1_LP_ILK, I915_WRITE(WM1_LP_ILK,
@ -3714,10 +3715,12 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm); cursor_wm);
/* WM2 */ /* WM2 */
if (!sandybridge_compute_srwm(dev, 2, if (!ironlake_compute_srwm(dev, 2,
hdisplay, htotal, pixel_size, hdisplay, htotal, pixel_size,
clock, SNB_READ_WM2_LATENCY() * 500, clock, SNB_READ_WM2_LATENCY() * 500,
&fbc_wm, &plane_wm, &cursor_wm)) &sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return; return;
I915_WRITE(WM2_LP_ILK, I915_WRITE(WM2_LP_ILK,
@ -3728,10 +3731,12 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm); cursor_wm);
/* WM3 */ /* WM3 */
if (!sandybridge_compute_srwm(dev, 3, if (!ironlake_compute_srwm(dev, 3,
hdisplay, htotal, pixel_size, hdisplay, htotal, pixel_size,
clock, SNB_READ_WM3_LATENCY() * 500, clock, SNB_READ_WM3_LATENCY() * 500,
&fbc_wm, &plane_wm, &cursor_wm)) &sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return; return;
I915_WRITE(WM3_LP_ILK, I915_WRITE(WM3_LP_ILK,
@ -3951,7 +3956,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int lane = 0, link_bw, bpp; int lane = 0, link_bw, bpp;
/* CPU eDP doesn't require FDI link, so just set DP M/N /* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */ according to current link config */
if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
target_clock = mode->clock; target_clock = mode->clock;
intel_edp_link_config(has_edp_encoder, intel_edp_link_config(has_edp_encoder,
&lane, &link_bw); &lane, &link_bw);
@ -5038,8 +5043,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dpll_reg = DPLL(pipe);
int dpll = I915_READ(dpll_reg); int dpll;
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
return; return;
@ -5047,17 +5052,19 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (!dev_priv->lvds_downclock_avail) if (!dev_priv->lvds_downclock_avail)
return; return;
dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n"); DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */ /* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | I915_WRITE(PP_CONTROL,
PANEL_UNLOCK_REGS); I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1; dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll); I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg); POSTING_READ(dpll_reg);
intel_wait_for_vblank(dev, pipe); intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg); dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1) if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@ -5802,6 +5809,8 @@ static void intel_setup_outputs(struct drm_device *dev)
encoder->base.possible_clones = encoder->base.possible_clones =
intel_encoder_clones(dev, encoder->clone_mask); intel_encoder_clones(dev, encoder->clone_mask);
} }
intel_panel_setup_backlight(dev);
} }
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@ -6145,6 +6154,10 @@ void intel_init_emon(struct drm_device *dev)
void gen6_enable_rps(struct drm_i915_private *dev_priv) void gen6_enable_rps(struct drm_i915_private *dev_priv)
{ {
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox;
int cur_freq, min_freq, max_freq;
int i; int i;
/* Here begins a magic sequence of register writes to enable /* Here begins a magic sequence of register writes to enable
@ -6216,6 +6229,29 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
500)) 500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
min_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq = rp_state_cap & 0xff;
cur_freq = (gt_perf_status & 0xff00) >> 8;
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
pcu_mbox = I915_READ(GEN6_PCODE_DATA);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
max_freq = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
}
/* In units of 100MHz */
dev_priv->max_delay = max_freq;
dev_priv->min_delay = min_freq;
dev_priv->cur_delay = cur_freq;
/* requires MSI enabled */ /* requires MSI enabled */
I915_WRITE(GEN6_PMIER, I915_WRITE(GEN6_PMIER,
GEN6_PM_MBOX_EVENT | GEN6_PM_MBOX_EVENT |
@ -6386,42 +6422,6 @@ void intel_enable_clock_gating(struct drm_device *dev)
} else if (IS_I830(dev)) { } else if (IS_I830(dev)) {
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
} }
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (dev_priv->renderctx) {
struct drm_i915_gem_object *obj = dev_priv->renderctx;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_SET_CONTEXT);
OUT_RING(obj->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
}
} else
DRM_DEBUG_KMS("Failed to allocate render context."
"Disable RC6\n");
}
if (IS_GEN4(dev) && IS_MOBILE(dev)) {
if (dev_priv->pwrctx == NULL)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj = dev_priv->pwrctx;
I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
I915_WRITE(MCHBAR_RENDER_STANDBY,
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
}
}
} }
void intel_disable_clock_gating(struct drm_device *dev) void intel_disable_clock_gating(struct drm_device *dev)
@ -6451,6 +6451,57 @@ void intel_disable_clock_gating(struct drm_device *dev)
} }
} }
static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
10);
POSTING_READ(CCID);
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
}
void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = BEGIN_LP_RING(6);
if (ret) {
ironlake_disable_rc6(dev);
return;
}
OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_SUSPEND_FLUSH);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
/* Set up chip specific display functions */ /* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev) static void intel_init_display(struct drm_device *dev)
{ {
@ -6665,12 +6716,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192; dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192; dev->mode_config.max_height = 8192;
} }
dev->mode_config.fb_base = dev->agp->base;
/* set memory base */
if (IS_GEN2(dev))
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
else
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
if (IS_MOBILE(dev) || !IS_GEN2(dev)) if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2; dev_priv->num_pipe = 2;
@ -6698,6 +6744,21 @@ void intel_modeset_init(struct drm_device *dev)
if (IS_GEN6(dev)) if (IS_GEN6(dev))
gen6_enable_rps(dev_priv); gen6_enable_rps(dev_priv);
if (IS_IRONLAKE_M(dev)) {
dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
goto skip_rc6;
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
goto skip_rc6;
}
ironlake_enable_rc6(dev);
}
skip_rc6:
INIT_WORK(&dev_priv->idle_work, intel_idle_update); INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev); (unsigned long)dev);
@ -6734,7 +6795,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (IS_GEN6(dev)) if (IS_GEN6(dev))
gen6_disable_rps(dev); gen6_disable_rps(dev);
intel_disable_clock_gating(dev); if (IS_IRONLAKE_M(dev))
ironlake_disable_rc6(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);

View File

@ -1153,18 +1153,27 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
static uint32_t static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set) intel_gen6_edp_signal_levels(uint8_t train_set)
{ {
switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400MV_0DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400MV_6DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800MV_0DB_SNB_B; case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
default: default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n"); DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
return EDP_LINK_TRAIN_400MV_0DB_SNB_B; "0x%x\n", signal_levels);
return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
} }
} }
@ -1334,17 +1343,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp->base.base.dev; struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false; bool channel_eq = false;
int tries; int tries, cr_tries;
u32 reg; u32 reg;
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
/* channel equalization */ /* channel equalization */
tries = 0; tries = 0;
cr_tries = 0;
channel_eq = false; channel_eq = false;
for (;;) { for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels; uint32_t signal_levels;
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
intel_dp_link_down(intel_dp);
break;
}
if (IS_GEN6(dev) && is_edp(intel_dp)) { if (IS_GEN6(dev) && is_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
@ -1367,14 +1383,26 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
if (!intel_dp_get_link_status(intel_dp)) if (!intel_dp_get_link_status(intel_dp))
break; break;
/* Make sure clock is still ok */
if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
if (intel_channel_eq_ok(intel_dp)) { if (intel_channel_eq_ok(intel_dp)) {
channel_eq = true; channel_eq = true;
break; break;
} }
/* Try 5 times */ /* Try 5 times, then try clock recovery if that fails */
if (tries > 5) if (tries > 5) {
break; intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
tries = 0;
cr_tries++;
continue;
}
/* Compute new intel_dp->train_set as requested by target */ /* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp); intel_get_adjust_train(intel_dp);

View File

@ -257,6 +257,9 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_prepare (struct drm_encoder *encoder);

View File

@ -62,6 +62,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_fb_helper_surface_size *sizes) struct drm_fb_helper_surface_size *sizes)
{ {
struct drm_device *dev = ifbdev->helper.dev; struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info; struct fb_info *info;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd; struct drm_mode_fb_cmd mode_cmd;
@ -77,7 +78,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.height = sizes->surface_height; mode_cmd.height = sizes->surface_height;
mode_cmd.bpp = sizes->surface_bpp; mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
mode_cmd.depth = sizes->surface_depth; mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height; size = mode_cmd.pitch * mode_cmd.height;
@ -120,6 +121,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops; info->fbops = &intelfb_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1); info->apertures = alloc_apertures(1);
if (!info->apertures) { if (!info->apertures) {
@ -127,10 +133,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin; goto out_unpin;
} }
info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (!IS_GEN2(dev)) info->apertures->ranges[0].size =
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size; info->fix.smem_len = size;
@ -140,12 +144,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
ret = -ENOSPC; ret = -ENOSPC;
goto out_unpin; goto out_unpin;
} }
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
info->screen_size = size; info->screen_size = size;
// memset(info->screen_base, 0, size); // memset(info->screen_base, 0, size);

View File

@ -106,7 +106,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg); POSTING_READ(lvds_reg);
intel_panel_set_backlight(dev, dev_priv->backlight_level); intel_panel_enable_backlight(dev);
} }
static void intel_lvds_disable(struct intel_lvds *intel_lvds) static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@ -123,8 +123,7 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
lvds_reg = LVDS; lvds_reg = LVDS;
} }
dev_priv->backlight_level = intel_panel_get_backlight(dev); intel_panel_disable_backlight(dev);
intel_panel_set_backlight(dev, 0);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
@ -375,6 +374,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
} }
out: out:
if ((pfit_control & PFIT_ENABLE) == 0) {
pfit_control = 0;
pfit_pgm_ratios = 0;
}
if (pfit_control != intel_lvds->pfit_control || if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control; intel_lvds->pfit_control = pfit_control;
@ -398,8 +401,6 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
dev_priv->backlight_level = intel_panel_get_backlight(dev);
/* We try to do the minimum that is necessary in order to unlock /* We try to do the minimum that is necessary in order to unlock
* the registers for mode setting. * the registers for mode setting.
* *
@ -430,9 +431,6 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
/* Undo any unlocking done in prepare to prevent accidental /* Undo any unlocking done in prepare to prevent accidental
* adjustment of the registers. * adjustment of the registers.
*/ */

View File

@ -250,3 +250,34 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level); I915_WRITE(BLC_PWM_CTL, tmp | level);
} }
void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_enabled) {
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = false;
}
intel_panel_set_backlight(dev, 0);
}
void intel_panel_enable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, dev_priv->backlight_level);
dev_priv->backlight_enabled = true;
}
void intel_panel_setup_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
}

View File

@ -48,7 +48,7 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
return seqno; return seqno;
} }
static void static int
render_ring_flush(struct intel_ring_buffer *ring, render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains) u32 flush_domains)
@ -56,6 +56,7 @@ render_ring_flush(struct intel_ring_buffer *ring,
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd; u32 cmd;
int ret;
#if WATCH_EXEC #if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@ -116,12 +117,16 @@ render_ring_flush(struct intel_ring_buffer *ring,
#if WATCH_EXEC #if WATCH_EXEC
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif #endif
if (intel_ring_begin(ring, 2) == 0) { ret = intel_ring_begin(ring, 2);
intel_ring_emit(ring, cmd); if (ret)
intel_ring_emit(ring, MI_NOOP); return ret;
intel_ring_advance(ring);
} intel_ring_emit(ring, cmd);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} }
return 0;
} }
static void ring_write_tail(struct intel_ring_buffer *ring, static void ring_write_tail(struct intel_ring_buffer *ring,
@ -480,26 +485,56 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
return pc->cpu_page[0]; return pc->cpu_page[0];
} }
static void
ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->gt_irq_mask &= ~mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
static void
ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->gt_irq_mask |= mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
static void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->irq_mask &= ~mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
static void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
dev_priv->irq_mask |= mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
static bool static bool
render_ring_get_irq(struct intel_ring_buffer *ring) render_ring_get_irq(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev->irq_enabled) if (!dev->irq_enabled)
return false; return false;
if (atomic_inc_return(&ring->irq_refcount) == 1) { spin_lock(&ring->irq_lock);
drm_i915_private_t *dev_priv = dev->dev_private; if (ring->irq_refcount++ == 0) {
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
ironlake_enable_graphics_irq(dev_priv, ironlake_enable_irq(dev_priv,
GT_PIPE_NOTIFY | GT_USER_INTERRUPT); GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT); i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
spin_unlock(&ring->irq_lock);
return true; return true;
} }
@ -508,20 +543,18 @@ static void
render_ring_put_irq(struct intel_ring_buffer *ring) render_ring_put_irq(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (atomic_dec_and_test(&ring->irq_refcount)) { spin_lock(&ring->irq_lock);
drm_i915_private_t *dev_priv = dev->dev_private; if (--ring->irq_refcount == 0) {
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
ironlake_disable_graphics_irq(dev_priv, ironlake_disable_irq(dev_priv,
GT_USER_INTERRUPT | GT_USER_INTERRUPT |
GT_PIPE_NOTIFY); GT_PIPE_NOTIFY);
else else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT); i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
spin_unlock(&ring->irq_lock);
} }
void intel_ring_setup_status_page(struct intel_ring_buffer *ring) void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@ -534,19 +567,24 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
POSTING_READ(mmio); POSTING_READ(mmio);
} }
static void static int
bsd_ring_flush(struct intel_ring_buffer *ring, bsd_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains) u32 flush_domains)
{ {
if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) int ret;
return;
if (intel_ring_begin(ring, 2) == 0) { if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
intel_ring_emit(ring, MI_FLUSH); return 0;
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); ret = intel_ring_begin(ring, 2);
} if (ret)
return ret;
intel_ring_emit(ring, MI_FLUSH);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
} }
static int static int
@ -577,18 +615,15 @@ static bool
ring_get_irq(struct intel_ring_buffer *ring, u32 flag) ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev->irq_enabled) if (!dev->irq_enabled)
return false; return false;
if (atomic_inc_return(&ring->irq_refcount) == 1) { spin_lock(&ring->irq_lock);
drm_i915_private_t *dev_priv = dev->dev_private; if (ring->irq_refcount++ == 0)
unsigned long irqflags; ironlake_enable_irq(dev_priv, flag);
spin_unlock(&ring->irq_lock);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_graphics_irq(dev_priv, flag);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
return true; return true;
} }
@ -597,15 +632,47 @@ static void
ring_put_irq(struct intel_ring_buffer *ring, u32 flag) ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (atomic_dec_and_test(&ring->irq_refcount)) { spin_lock(&ring->irq_lock);
drm_i915_private_t *dev_priv = dev->dev_private; if (--ring->irq_refcount == 0)
unsigned long irqflags; ironlake_disable_irq(dev_priv, flag);
spin_unlock(&ring->irq_lock);
}
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); static bool
ironlake_disable_graphics_irq(dev_priv, flag); gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); {
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev->irq_enabled)
return false;
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
ring->irq_mask &= ~rflag;
I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_enable_irq(dev_priv, gflag);
} }
spin_unlock(&ring->irq_lock);
return true;
}
static void
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0) {
ring->irq_mask |= rflag;
I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_disable_irq(dev_priv, gflag);
}
spin_unlock(&ring->irq_lock);
} }
static bool static bool
@ -748,6 +815,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list); INIT_LIST_HEAD(&ring->gpu_write_list);
spin_lock_init(&ring->irq_lock);
ring->irq_mask = ~0;
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring); ret = init_status_page(ring);
if (ret) if (ret)
@ -785,6 +855,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (ret) if (ret)
goto err_unmap; goto err_unmap;
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
ring->effective_size = ring->size;
if (IS_I830(ring->dev))
ring->effective_size -= 128;
return 0; return 0;
err_unmap: err_unmap:
@ -827,8 +905,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{ {
unsigned int *virt; unsigned int *virt;
int rem; int rem = ring->size - ring->tail;
rem = ring->size - ring->tail;
if (ring->space < rem) { if (ring->space < rem) {
int ret = intel_wait_ring_buffer(ring, rem); int ret = intel_wait_ring_buffer(ring, rem);
@ -895,7 +972,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
int n = 4*num_dwords; int n = 4*num_dwords;
int ret; int ret;
if (unlikely(ring->tail + n > ring->size)) { if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring); ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
@ -973,20 +1050,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
} }
static void gen6_ring_flush(struct intel_ring_buffer *ring, static int gen6_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains) u32 flush_domains)
{ {
if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) int ret;
return;
if (intel_ring_begin(ring, 4) == 0) { if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
intel_ring_emit(ring, MI_FLUSH_DW); return 0;
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0); ret = intel_ring_begin(ring, 4);
intel_ring_emit(ring, 0); if (ret)
intel_ring_advance(ring); return ret;
}
intel_ring_emit(ring, MI_FLUSH_DW);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
} }
static int static int
@ -1007,16 +1089,36 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0; return 0;
} }
static bool
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_get_irq(ring,
GT_USER_INTERRUPT,
GEN6_RENDER_USER_INTERRUPT);
}
static void
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_put_irq(ring,
GT_USER_INTERRUPT,
GEN6_RENDER_USER_INTERRUPT);
}
static bool static bool
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
{ {
return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); return gen6_ring_get_irq(ring,
GT_GEN6_BSD_USER_INTERRUPT,
GEN6_BSD_USER_INTERRUPT);
} }
static void static void
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
{ {
ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); return gen6_ring_put_irq(ring,
GT_GEN6_BSD_USER_INTERRUPT,
GEN6_BSD_USER_INTERRUPT);
} }
/* ring buffer for Video Codec for Gen6+ */ /* ring buffer for Video Codec for Gen6+ */
@ -1040,13 +1142,17 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
static bool static bool
blt_ring_get_irq(struct intel_ring_buffer *ring) blt_ring_get_irq(struct intel_ring_buffer *ring)
{ {
return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); return gen6_ring_get_irq(ring,
GT_BLT_USER_INTERRUPT,
GEN6_BLITTER_USER_INTERRUPT);
} }
static void static void
blt_ring_put_irq(struct intel_ring_buffer *ring) blt_ring_put_irq(struct intel_ring_buffer *ring)
{ {
ring_put_irq(ring, GT_BLT_USER_INTERRUPT); gen6_ring_put_irq(ring,
GT_BLT_USER_INTERRUPT,
GEN6_BLITTER_USER_INTERRUPT);
} }
@ -1115,20 +1221,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
return intel_ring_begin(ring, 4); return intel_ring_begin(ring, 4);
} }
static void blt_ring_flush(struct intel_ring_buffer *ring, static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains) u32 flush_domains)
{ {
if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) int ret;
return;
if (blt_ring_begin(ring, 4) == 0) { if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
intel_ring_emit(ring, MI_FLUSH_DW); return 0;
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0); ret = blt_ring_begin(ring, 4);
intel_ring_emit(ring, 0); if (ret)
intel_ring_advance(ring); return ret;
}
intel_ring_emit(ring, MI_FLUSH_DW);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
} }
static void blt_ring_cleanup(struct intel_ring_buffer *ring) static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@ -1165,6 +1276,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
*ring = render_ring; *ring = render_ring;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request; ring->add_request = gen6_add_request;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
} else if (IS_GEN5(dev)) { } else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request; ring->add_request = pc_render_add_request;
ring->get_seqno = pc_render_get_seqno; ring->get_seqno = pc_render_get_seqno;

View File

@ -16,21 +16,24 @@ struct intel_hw_status_page {
#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
struct intel_ring_buffer { struct intel_ring_buffer {
const char *name; const char *name;
@ -49,12 +52,15 @@ struct intel_ring_buffer {
u32 tail; u32 tail;
int space; int space;
int size; int size;
int effective_size;
struct intel_hw_status_page status_page; struct intel_hw_status_page status_page;
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */ u32 irq_seqno; /* last seq seem at irq time */
u32 waiting_seqno; u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1]; u32 sync_seqno[I915_NUM_RINGS-1];
atomic_t irq_refcount;
bool __must_check (*irq_get)(struct intel_ring_buffer *ring); bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring); void (*irq_put)(struct intel_ring_buffer *ring);
@ -62,9 +68,9 @@ struct intel_ring_buffer {
void (*write_tail)(struct intel_ring_buffer *ring, void (*write_tail)(struct intel_ring_buffer *ring,
u32 value); u32 value);
void (*flush)(struct intel_ring_buffer *ring, int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains); u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring, int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno); u32 *seqno);
u32 (*get_seqno)(struct intel_ring_buffer *ring); u32 (*get_seqno)(struct intel_ring_buffer *ring);

View File

@ -1024,9 +1024,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo)) if (!intel_sdvo_set_target_input(intel_sdvo))
return; return;
if (intel_sdvo->has_hdmi_monitor && if (intel_sdvo->has_hdmi_monitor) {
!intel_sdvo_set_avi_infoframe(intel_sdvo)) intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
return; intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
if (intel_sdvo->is_tv && if (intel_sdvo->is_tv &&
!intel_sdvo_set_tv_format(intel_sdvo)) !intel_sdvo_set_tv_format(intel_sdvo))
@ -1398,6 +1402,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->attached_output = response; intel_sdvo->attached_output = response;
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
if ((intel_sdvo_connector->output_flag & response) == 0) if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected; ret = connector_status_disconnected;
else if (response & SDVO_TMDS_MASK) else if (response & SDVO_TMDS_MASK)
@ -1922,20 +1929,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
static bool static bool
intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
{ {
int is_hdmi; return intel_sdvo_check_supp_encode(intel_sdvo);
if (!intel_sdvo_check_supp_encode(intel_sdvo))
return false;
if (!intel_sdvo_set_target_output(intel_sdvo,
device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
return false;
is_hdmi = 0;
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
return false;
return !!is_hdmi;
} }
static u8 static u8
@ -2037,12 +2031,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
connector->connector_type = DRM_MODE_CONNECTOR_DVID; connector->connector_type = DRM_MODE_CONNECTOR_DVID;
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
/* enable hdmi encoding mode if supported */
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true; intel_sdvo->is_hdmi = true;
} }
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |

View File

@ -2963,6 +2963,7 @@ config TILE_NET
config XEN_NETDEV_FRONTEND config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver" tristate "Xen network device frontend driver"
depends on XEN depends on XEN
select XEN_XENBUS_FRONTEND
default y default y
help help
The network device frontend driver allows the kernel to The network device frontend driver allows the kernel to

View File

@ -45,6 +45,7 @@ config XEN_PCIDEV_FRONTEND
depends on PCI && X86 && XEN depends on PCI && X86 && XEN
select HOTPLUG select HOTPLUG
select PCI_XEN select PCI_XEN
select XEN_XENBUS_FRONTEND
default y default y
help help
The PCI device frontend driver allows the kernel to import arbitrary The PCI device frontend driver allows the kernel to import arbitrary

View File

@ -29,6 +29,14 @@ config XEN_DEV_EVTCHN
firing. firing.
If in doubt, say yes. If in doubt, say yes.
config XEN_BACKEND
bool "Backend driver support"
depends on XEN_DOM0
default y
help
Support for backend device drivers that provide I/O services
to other virtual machines.
config XENFS config XENFS
tristate "Xen filesystem" tristate "Xen filesystem"
default y default y
@ -62,6 +70,9 @@ config XEN_SYS_HYPERVISOR
virtual environment, /sys/hypervisor will still be present, virtual environment, /sys/hypervisor will still be present,
but will have no xen contents. but will have no xen contents.
config XEN_XENBUS_FRONTEND
tristate
config XEN_PLATFORM_PCI config XEN_PLATFORM_PCI
tristate "xen platform pci device driver" tristate "xen platform pci device driver"
depends on XEN_PVHVM depends on XEN_PVHVM

View File

@ -5,3 +5,8 @@ xenbus-objs += xenbus_client.o
xenbus-objs += xenbus_comms.o xenbus-objs += xenbus_comms.o
xenbus-objs += xenbus_xs.o xenbus-objs += xenbus_xs.o
xenbus-objs += xenbus_probe.o xenbus-objs += xenbus_probe.o
xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
xenbus-objs += $(xenbus-be-objs-y)
obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o

View File

@ -56,7 +56,6 @@
#include <xen/events.h> #include <xen/events.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/platform_pci.h>
#include <xen/hvm.h> #include <xen/hvm.h>
#include "xenbus_comms.h" #include "xenbus_comms.h"
@ -73,15 +72,6 @@ static unsigned long xen_store_mfn;
static BLOCKING_NOTIFIER_HEAD(xenstore_chain); static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
static void wait_for_devices(struct xenbus_driver *xendrv);
static int xenbus_probe_frontend(const char *type, const char *name);
static void xenbus_dev_shutdown(struct device *_dev);
static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
static int xenbus_dev_resume(struct device *dev);
/* If something in array of ids matches this device, return it. */ /* If something in array of ids matches this device, return it. */
static const struct xenbus_device_id * static const struct xenbus_device_id *
match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@ -102,34 +92,7 @@ int xenbus_match(struct device *_dev, struct device_driver *_drv)
return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
} }
EXPORT_SYMBOL_GPL(xenbus_match);
static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
return -ENOMEM;
return 0;
}
/* device/<type>/<id> => <type>-<id> */
static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
{
nodename = strchr(nodename, '/');
if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
return -EINVAL;
}
strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
return -EINVAL;
}
*strchr(bus_id, '/') = '-';
return 0;
}
static void free_otherend_details(struct xenbus_device *dev) static void free_otherend_details(struct xenbus_device *dev)
@ -149,7 +112,30 @@ static void free_otherend_watch(struct xenbus_device *dev)
} }
int read_otherend_details(struct xenbus_device *xendev, static int talk_to_otherend(struct xenbus_device *dev)
{
struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
free_otherend_watch(dev);
free_otherend_details(dev);
return drv->read_otherend_details(dev);
}
static int watch_otherend(struct xenbus_device *dev)
{
struct xen_bus_type *bus =
container_of(dev->dev.bus, struct xen_bus_type, bus);
return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
bus->otherend_changed,
"%s/%s", dev->otherend, "state");
}
int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node) char *id_node, char *path_node)
{ {
int err = xenbus_gather(XBT_NIL, xendev->nodename, int err = xenbus_gather(XBT_NIL, xendev->nodename,
@ -174,39 +160,11 @@ int read_otherend_details(struct xenbus_device *xendev,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
void xenbus_otherend_changed(struct xenbus_watch *watch,
static int read_backend_details(struct xenbus_device *xendev) const char **vec, unsigned int len,
{ int ignore_on_shutdown)
return read_otherend_details(xendev, "backend-id", "backend");
}
static struct device_attribute xenbus_dev_attrs[] = {
__ATTR_NULL
};
/* Bus type for frontend drivers. */
static struct xen_bus_type xenbus_frontend = {
.root = "device",
.levels = 2, /* device/type/<id> */
.get_bus_id = frontend_bus_id,
.probe = xenbus_probe_frontend,
.bus = {
.name = "xen",
.match = xenbus_match,
.uevent = xenbus_uevent,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_dev_attrs,
.suspend = xenbus_dev_suspend,
.resume = xenbus_dev_resume,
},
};
static void otherend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{ {
struct xenbus_device *dev = struct xenbus_device *dev =
container_of(watch, struct xenbus_device, otherend_watch); container_of(watch, struct xenbus_device, otherend_watch);
@ -234,11 +192,7 @@ static void otherend_changed(struct xenbus_watch *watch,
* work that can fail e.g., when the rootfs is gone. * work that can fail e.g., when the rootfs is gone.
*/ */
if (system_state > SYSTEM_RUNNING) { if (system_state > SYSTEM_RUNNING) {
struct xen_bus_type *bus = bus; if (ignore_on_shutdown && (state == XenbusStateClosing))
bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
/* If we're frontend, drive the state machine to Closed. */
/* This should cause the backend to release our resources. */
if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
xenbus_frontend_closed(dev); xenbus_frontend_closed(dev);
return; return;
} }
@ -246,25 +200,7 @@ static void otherend_changed(struct xenbus_watch *watch,
if (drv->otherend_changed) if (drv->otherend_changed)
drv->otherend_changed(dev, state); drv->otherend_changed(dev, state);
} }
EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
static int talk_to_otherend(struct xenbus_device *dev)
{
struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
free_otherend_watch(dev);
free_otherend_details(dev);
return drv->read_otherend_details(dev);
}
static int watch_otherend(struct xenbus_device *dev)
{
return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
"%s/%s", dev->otherend, "state");
}
int xenbus_dev_probe(struct device *_dev) int xenbus_dev_probe(struct device *_dev)
{ {
@ -308,8 +244,9 @@ int xenbus_dev_probe(struct device *_dev)
fail: fail:
xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
xenbus_switch_state(dev, XenbusStateClosed); xenbus_switch_state(dev, XenbusStateClosed);
return -ENODEV; return err;
} }
EXPORT_SYMBOL_GPL(xenbus_dev_probe);
int xenbus_dev_remove(struct device *_dev) int xenbus_dev_remove(struct device *_dev)
{ {
@ -327,8 +264,9 @@ int xenbus_dev_remove(struct device *_dev)
xenbus_switch_state(dev, XenbusStateClosed); xenbus_switch_state(dev, XenbusStateClosed);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xenbus_dev_remove);
static void xenbus_dev_shutdown(struct device *_dev) void xenbus_dev_shutdown(struct device *_dev)
{ {
struct xenbus_device *dev = to_xenbus_device(_dev); struct xenbus_device *dev = to_xenbus_device(_dev);
unsigned long timeout = 5*HZ; unsigned long timeout = 5*HZ;
@ -349,6 +287,7 @@ static void xenbus_dev_shutdown(struct device *_dev)
out: out:
put_device(&dev->dev); put_device(&dev->dev);
} }
EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv, int xenbus_register_driver_common(struct xenbus_driver *drv,
struct xen_bus_type *bus, struct xen_bus_type *bus,
@ -362,25 +301,7 @@ int xenbus_register_driver_common(struct xenbus_driver *drv,
return driver_register(&drv->driver); return driver_register(&drv->driver);
} }
EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
int __xenbus_register_frontend(struct xenbus_driver *drv,
struct module *owner, const char *mod_name)
{
int ret;
drv->read_otherend_details = read_backend_details;
ret = xenbus_register_driver_common(drv, &xenbus_frontend,
owner, mod_name);
if (ret)
return ret;
/* If this driver is loaded as a module wait for devices to attach. */
wait_for_devices(drv);
return 0;
}
EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
void xenbus_unregister_driver(struct xenbus_driver *drv) void xenbus_unregister_driver(struct xenbus_driver *drv)
{ {
@ -551,24 +472,7 @@ fail:
kfree(xendev); kfree(xendev);
return err; return err;
} }
EXPORT_SYMBOL_GPL(xenbus_probe_node);
/* device/<typename>/<name> */
static int xenbus_probe_frontend(const char *type, const char *name)
{
char *nodename;
int err;
nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
xenbus_frontend.root, type, name);
if (!nodename)
return -ENOMEM;
DPRINTK("%s", nodename);
err = xenbus_probe_node(&xenbus_frontend, type, nodename);
kfree(nodename);
return err;
}
static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
{ {
@ -582,10 +486,11 @@ static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
return PTR_ERR(dir); return PTR_ERR(dir);
for (i = 0; i < dir_n; i++) { for (i = 0; i < dir_n; i++) {
err = bus->probe(type, dir[i]); err = bus->probe(bus, type, dir[i]);
if (err) if (err)
break; break;
} }
kfree(dir); kfree(dir);
return err; return err;
} }
@ -605,9 +510,11 @@ int xenbus_probe_devices(struct xen_bus_type *bus)
if (err) if (err)
break; break;
} }
kfree(dir); kfree(dir);
return err; return err;
} }
EXPORT_SYMBOL_GPL(xenbus_probe_devices);
static unsigned int char_count(const char *str, char c) static unsigned int char_count(const char *str, char c)
{ {
@ -670,32 +577,18 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
} }
EXPORT_SYMBOL_GPL(xenbus_dev_changed); EXPORT_SYMBOL_GPL(xenbus_dev_changed);
static void frontend_changed(struct xenbus_watch *watch, int xenbus_dev_suspend(struct device *dev, pm_message_t state)
const char **vec, unsigned int len)
{
DPRINTK("");
xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
}
/* We watch for devices appearing and vanishing. */
static struct xenbus_watch fe_watch = {
.node = "device",
.callback = frontend_changed,
};
static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
{ {
int err = 0; int err = 0;
struct xenbus_driver *drv; struct xenbus_driver *drv;
struct xenbus_device *xdev; struct xenbus_device *xdev
= container_of(dev, struct xenbus_device, dev);
DPRINTK(""); DPRINTK("%s", xdev->nodename);
if (dev->driver == NULL) if (dev->driver == NULL)
return 0; return 0;
drv = to_xenbus_driver(dev->driver); drv = to_xenbus_driver(dev->driver);
xdev = container_of(dev, struct xenbus_device, dev);
if (drv->suspend) if (drv->suspend)
err = drv->suspend(xdev, state); err = drv->suspend(xdev, state);
if (err) if (err)
@ -703,21 +596,20 @@ static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
"xenbus: suspend %s failed: %i\n", dev_name(dev), err); "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
static int xenbus_dev_resume(struct device *dev) int xenbus_dev_resume(struct device *dev)
{ {
int err; int err;
struct xenbus_driver *drv; struct xenbus_driver *drv;
struct xenbus_device *xdev; struct xenbus_device *xdev
= container_of(dev, struct xenbus_device, dev);
DPRINTK(""); DPRINTK("%s", xdev->nodename);
if (dev->driver == NULL) if (dev->driver == NULL)
return 0; return 0;
drv = to_xenbus_driver(dev->driver); drv = to_xenbus_driver(dev->driver);
xdev = container_of(dev, struct xenbus_device, dev);
err = talk_to_otherend(xdev); err = talk_to_otherend(xdev);
if (err) { if (err) {
printk(KERN_WARNING printk(KERN_WARNING
@ -748,6 +640,7 @@ static int xenbus_dev_resume(struct device *dev)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xenbus_dev_resume);
/* A flag to determine if xenstored is 'ready' (i.e. has started) */ /* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0; int xenstored_ready = 0;
@ -776,11 +669,6 @@ void xenbus_probe(struct work_struct *unused)
{ {
xenstored_ready = 1; xenstored_ready = 1;
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
register_xenbus_watch(&fe_watch);
xenbus_backend_probe_and_watch();
/* Notify others that xenstore is up */ /* Notify others that xenstore is up */
blocking_notifier_call_chain(&xenstore_chain, 0, NULL); blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
} }
@ -809,16 +697,7 @@ static int __init xenbus_init(void)
err = -ENODEV; err = -ENODEV;
if (!xen_domain()) if (!xen_domain())
goto out_error; return err;
/* Register ourselves with the kernel bus subsystem */
err = bus_register(&xenbus_frontend.bus);
if (err)
goto out_error;
err = xenbus_backend_bus_register();
if (err)
goto out_unreg_front;
/* /*
* Domain0 doesn't have a store_evtchn or store_mfn yet. * Domain0 doesn't have a store_evtchn or store_mfn yet.
@ -874,7 +753,7 @@ static int __init xenbus_init(void)
if (err) { if (err) {
printk(KERN_WARNING printk(KERN_WARNING
"XENBUS: Error initializing xenstore comms: %i\n", err); "XENBUS: Error initializing xenstore comms: %i\n", err);
goto out_unreg_back; goto out_error;
} }
#ifdef CONFIG_XEN_COMPAT_XENFS #ifdef CONFIG_XEN_COMPAT_XENFS
@ -887,133 +766,13 @@ static int __init xenbus_init(void)
return 0; return 0;
out_unreg_back:
xenbus_backend_bus_unregister();
out_unreg_front:
bus_unregister(&xenbus_frontend.bus);
out_error: out_error:
if (page != 0) if (page != 0)
free_page(page); free_page(page);
return err; return err;
} }
postcore_initcall(xenbus_init); postcore_initcall(xenbus_init);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static int is_device_connecting(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
struct xenbus_driver *xendrv;
/*
* A device with no driver will never connect. We care only about
* devices which should currently be in the process of connecting.
*/
if (!dev->driver)
return 0;
/* Is this search limited to a particular driver? */
if (drv && (dev->driver != drv))
return 0;
xendrv = to_xenbus_driver(dev->driver);
return (xendev->state < XenbusStateConnected ||
(xendev->state == XenbusStateConnected &&
xendrv->is_ready && !xendrv->is_ready(xendev)));
}
static int exists_connecting_device(struct device_driver *drv)
{
return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
is_device_connecting);
}
static int print_device_status(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
/* Is this operation limited to a particular driver? */
if (drv && (dev->driver != drv))
return 0;
if (!dev->driver) {
/* Information only: is this too noisy? */
printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
xendev->nodename);
} else if (xendev->state < XenbusStateConnected) {
enum xenbus_state rstate = XenbusStateUnknown;
if (xendev->otherend)
rstate = xenbus_read_driver_state(xendev->otherend);
printk(KERN_WARNING "XENBUS: Timeout connecting "
"to device: %s (local state %d, remote state %d)\n",
xendev->nodename, xendev->state, rstate);
}
return 0;
}
/* We only wait for device setup after most initcalls have run. */
static int ready_to_wait_for_devices;
/*
* On a 5-minute timeout, wait for all devices currently configured. We need
* to do this to guarantee that the filesystems and / or network devices
* needed for boot are available, before we can allow the boot to proceed.
*
* This needs to be on a late_initcall, to happen after the frontend device
* drivers have been initialised, but before the root fs is mounted.
*
* A possible improvement here would be to have the tools add a per-device
* flag to the store entry, indicating whether it is needed at boot time.
* This would allow people who knew what they were doing to accelerate their
* boot slightly, but of course needs tools or manual intervention to set up
* those flags correctly.
*/
static void wait_for_devices(struct xenbus_driver *xendrv)
{
unsigned long start = jiffies;
struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
unsigned int seconds_waited = 0;
if (!ready_to_wait_for_devices || !xen_domain())
return;
while (exists_connecting_device(drv)) {
if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
if (!seconds_waited)
printk(KERN_WARNING "XENBUS: Waiting for "
"devices to initialise: ");
seconds_waited += 5;
printk("%us...", 300 - seconds_waited);
if (seconds_waited == 300)
break;
}
schedule_timeout_interruptible(HZ/10);
}
if (seconds_waited)
printk("\n");
bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
print_device_status);
}
#ifndef MODULE
static int __init boot_wait_for_devices(void)
{
if (xen_hvm_domain() && !xen_platform_pci_unplug)
return -ENODEV;
ready_to_wait_for_devices = 1;
wait_for_devices(NULL);
return 0;
}
late_initcall(boot_wait_for_devices);
#endif

View File

@ -36,26 +36,15 @@
#define XEN_BUS_ID_SIZE 20 #define XEN_BUS_ID_SIZE 20
#ifdef CONFIG_XEN_BACKEND
extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
extern void xenbus_backend_probe_and_watch(void);
extern int xenbus_backend_bus_register(void);
extern void xenbus_backend_bus_unregister(void);
#else
static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
static inline void xenbus_backend_probe_and_watch(void) {}
static inline int xenbus_backend_bus_register(void) { return 0; }
static inline void xenbus_backend_bus_unregister(void) {}
#endif
struct xen_bus_type struct xen_bus_type
{ {
char *root; char *root;
unsigned int levels; unsigned int levels;
int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
int (*probe)(const char *type, const char *dir); int (*probe)(struct xen_bus_type *bus, const char *type,
const char *dir);
void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
unsigned int len);
struct bus_type bus; struct bus_type bus;
}; };
@ -73,4 +62,16 @@ extern int xenbus_probe_devices(struct xen_bus_type *bus);
extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
extern void xenbus_dev_shutdown(struct device *_dev);
extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
extern int xenbus_dev_resume(struct device *dev);
extern void xenbus_otherend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len,
int ignore_on_shutdown);
extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node);
#endif #endif

View File

@ -0,0 +1,276 @@
/******************************************************************************
* Talks to Xen Store to figure out what devices we have (backend half).
*
* Copyright (C) 2005 Rusty Russell, IBM Corporation
* Copyright (C) 2005 Mike Wray, Hewlett-Packard
* Copyright (C) 2005, 2006 XenSource Ltd
* Copyright (C) 2007 Solarflare Communications, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define DPRINTK(fmt, args...) \
pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <asm/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/features.h>
#include "xenbus_comms.h"
#include "xenbus_probe.h"
/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
{
int domid, err;
const char *devid, *type, *frontend;
unsigned int typelen;
type = strchr(nodename, '/');
if (!type)
return -EINVAL;
type++;
typelen = strcspn(type, "/");
if (!typelen || type[typelen] != '/')
return -EINVAL;
devid = strrchr(nodename, '/') + 1;
err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
"frontend", NULL, &frontend,
NULL);
if (err)
return err;
if (strlen(frontend) == 0)
err = -ERANGE;
if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
err = -ENOENT;
kfree(frontend);
if (err)
return err;
if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s",
typelen, type, domid, devid) >= XEN_BUS_ID_SIZE)
return -ENOSPC;
return 0;
}
static int xenbus_uevent_backend(struct device *dev,
struct kobj_uevent_env *env)
{
struct xenbus_device *xdev;
struct xenbus_driver *drv;
struct xen_bus_type *bus;
DPRINTK("");
if (dev == NULL)
return -ENODEV;
xdev = to_xenbus_device(dev);
bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
if (xdev == NULL)
return -ENODEV;
/* stuff we want to pass to /sbin/hotplug */
if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
return -ENOMEM;
if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename))
return -ENOMEM;
if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root))
return -ENOMEM;
if (dev->driver) {
drv = to_xenbus_driver(dev->driver);
if (drv && drv->uevent)
return drv->uevent(xdev, env);
}
return 0;
}
/* backend/<typename>/<frontend-uuid>/<name> */
static int xenbus_probe_backend_unit(struct xen_bus_type *bus,
const char *dir,
const char *type,
const char *name)
{
char *nodename;
int err;
nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
if (!nodename)
return -ENOMEM;
DPRINTK("%s\n", nodename);
err = xenbus_probe_node(bus, type, nodename);
kfree(nodename);
return err;
}
/* backend/<typename>/<frontend-domid> */
static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
const char *domid)
{
char *nodename;
int err = 0;
char **dir;
unsigned int i, dir_n = 0;
DPRINTK("");
nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid);
if (!nodename)
return -ENOMEM;
dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
if (IS_ERR(dir)) {
kfree(nodename);
return PTR_ERR(dir);
}
for (i = 0; i < dir_n; i++) {
err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]);
if (err)
break;
}
kfree(dir);
kfree(nodename);
return err;
}
static void frontend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
xenbus_otherend_changed(watch, vec, len, 0);
}
static struct device_attribute xenbus_backend_dev_attrs[] = {
__ATTR_NULL
};
static struct xen_bus_type xenbus_backend = {
.root = "backend",
.levels = 3, /* backend/type/<frontend>/<id> */
.get_bus_id = backend_bus_id,
.probe = xenbus_probe_backend,
.otherend_changed = frontend_changed,
.bus = {
.name = "xen-backend",
.match = xenbus_match,
.uevent = xenbus_uevent_backend,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_backend_dev_attrs,
},
};
static void backend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
DPRINTK("");
xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
}
static struct xenbus_watch be_watch = {
.node = "backend",
.callback = backend_changed,
};
static int read_frontend_details(struct xenbus_device *xendev)
{
return xenbus_read_otherend_details(xendev, "frontend-id", "frontend");
}
int xenbus_dev_is_online(struct xenbus_device *dev)
{
int rc, val;
rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
if (rc != 1)
val = 0; /* no online node present */
return val;
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
int __xenbus_register_backend(struct xenbus_driver *drv,
struct module *owner, const char *mod_name)
{
drv->read_otherend_details = read_frontend_details;
return xenbus_register_driver_common(drv, &xenbus_backend,
owner, mod_name);
}
EXPORT_SYMBOL_GPL(__xenbus_register_backend);
static int backend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_backend);
register_xenbus_watch(&be_watch);
return NOTIFY_DONE;
}
static int __init xenbus_probe_backend_init(void)
{
static struct notifier_block xenstore_notifier = {
.notifier_call = backend_probe_and_watch
};
int err;
DPRINTK("");
/* Register ourselves with the kernel bus subsystem */
err = bus_register(&xenbus_backend.bus);
if (err)
return err;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
subsys_initcall(xenbus_probe_backend_init);

View File

@ -0,0 +1,294 @@
#define DPRINTK(fmt, args...) \
pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/platform_pci.h>
#include "xenbus_comms.h"
#include "xenbus_probe.h"
/* device/<type>/<id> => <type>-<id> */
static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
{
nodename = strchr(nodename, '/');
if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
return -EINVAL;
}
strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
if (!strchr(bus_id, '/')) {
printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
return -EINVAL;
}
*strchr(bus_id, '/') = '-';
return 0;
}
/* device/<typename>/<name> */
static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
const char *name)
{
char *nodename;
int err;
nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name);
if (!nodename)
return -ENOMEM;
DPRINTK("%s", nodename);
err = xenbus_probe_node(bus, type, nodename);
kfree(nodename);
return err;
}
static int xenbus_uevent_frontend(struct device *_dev,
struct kobj_uevent_env *env)
{
struct xenbus_device *dev = to_xenbus_device(_dev);
if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
return -ENOMEM;
return 0;
}
static void backend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
xenbus_otherend_changed(watch, vec, len, 1);
}
static struct device_attribute xenbus_frontend_dev_attrs[] = {
__ATTR_NULL
};
static struct xen_bus_type xenbus_frontend = {
.root = "device",
.levels = 2, /* device/type/<id> */
.get_bus_id = frontend_bus_id,
.probe = xenbus_probe_frontend,
.otherend_changed = backend_changed,
.bus = {
.name = "xen",
.match = xenbus_match,
.uevent = xenbus_uevent_frontend,
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_frontend_dev_attrs,
.suspend = xenbus_dev_suspend,
.resume = xenbus_dev_resume,
},
};
static void frontend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
DPRINTK("");
xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
}
/* We watch for devices appearing and vanishing. */
static struct xenbus_watch fe_watch = {
.node = "device",
.callback = frontend_changed,
};
static int read_backend_details(struct xenbus_device *xendev)
{
return xenbus_read_otherend_details(xendev, "backend-id", "backend");
}
static int is_device_connecting(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
struct xenbus_driver *xendrv;
/*
* A device with no driver will never connect. We care only about
* devices which should currently be in the process of connecting.
*/
if (!dev->driver)
return 0;
/* Is this search limited to a particular driver? */
if (drv && (dev->driver != drv))
return 0;
xendrv = to_xenbus_driver(dev->driver);
return (xendev->state < XenbusStateConnected ||
(xendev->state == XenbusStateConnected &&
xendrv->is_ready && !xendrv->is_ready(xendev)));
}
static int exists_connecting_device(struct device_driver *drv)
{
return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
is_device_connecting);
}
static int print_device_status(struct device *dev, void *data)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
/* Is this operation limited to a particular driver? */
if (drv && (dev->driver != drv))
return 0;
if (!dev->driver) {
/* Information only: is this too noisy? */
printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
xendev->nodename);
} else if (xendev->state < XenbusStateConnected) {
enum xenbus_state rstate = XenbusStateUnknown;
if (xendev->otherend)
rstate = xenbus_read_driver_state(xendev->otherend);
printk(KERN_WARNING "XENBUS: Timeout connecting "
"to device: %s (local state %d, remote state %d)\n",
xendev->nodename, xendev->state, rstate);
}
return 0;
}
/* We only wait for device setup after most initcalls have run. */
static int ready_to_wait_for_devices;
/*
* On a 5-minute timeout, wait for all devices currently configured. We need
* to do this to guarantee that the filesystems and / or network devices
* needed for boot are available, before we can allow the boot to proceed.
*
* This needs to be on a late_initcall, to happen after the frontend device
* drivers have been initialised, but before the root fs is mounted.
*
* A possible improvement here would be to have the tools add a per-device
* flag to the store entry, indicating whether it is needed at boot time.
* This would allow people who knew what they were doing to accelerate their
* boot slightly, but of course needs tools or manual intervention to set up
* those flags correctly.
*/
static void wait_for_devices(struct xenbus_driver *xendrv)
{
unsigned long start = jiffies;
struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
unsigned int seconds_waited = 0;
if (!ready_to_wait_for_devices || !xen_domain())
return;
while (exists_connecting_device(drv)) {
if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
if (!seconds_waited)
printk(KERN_WARNING "XENBUS: Waiting for "
"devices to initialise: ");
seconds_waited += 5;
printk("%us...", 300 - seconds_waited);
if (seconds_waited == 300)
break;
}
schedule_timeout_interruptible(HZ/10);
}
if (seconds_waited)
printk("\n");
bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
print_device_status);
}
int __xenbus_register_frontend(struct xenbus_driver *drv,
struct module *owner, const char *mod_name)
{
int ret;
drv->read_otherend_details = read_backend_details;
ret = xenbus_register_driver_common(drv, &xenbus_frontend,
owner, mod_name);
if (ret)
return ret;
/* If this driver is loaded as a module wait for devices to attach. */
wait_for_devices(drv);
return 0;
}
EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
static int frontend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
void *data)
{
/* Enumerate devices in xenstore and watch for changes. */
xenbus_probe_devices(&xenbus_frontend);
register_xenbus_watch(&fe_watch);
return NOTIFY_DONE;
}
static int __init xenbus_probe_frontend_init(void)
{
static struct notifier_block xenstore_notifier = {
.notifier_call = frontend_probe_and_watch
};
int err;
DPRINTK("");
/* Register ourselves with the kernel bus subsystem */
err = bus_register(&xenbus_frontend.bus);
if (err)
return err;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
subsys_initcall(xenbus_probe_frontend_init);
#ifndef MODULE
static int __init boot_wait_for_devices(void)
{
if (xen_hvm_domain() && !xen_platform_pci_unplug)
return -ENODEV;
ready_to_wait_for_devices = 1;
wait_for_devices(NULL);
return 0;
}
late_initcall(boot_wait_for_devices);
#endif
MODULE_LICENSE("GPL");

View File

@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
unistr.o upcase.o unistr.o upcase.o
EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\" EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\"
ifeq ($(CONFIG_NTFS_DEBUG),y) ifeq ($(CONFIG_NTFS_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG EXTRA_CFLAGS += -DDEBUG

View File

@ -1,7 +1,7 @@
/* /*
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project. * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
* *
* Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
* *
* This program/include file is free software; you can redistribute it and/or * This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published * modify it under the terms of the GNU General Public License as published
@ -1380,15 +1380,14 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp,
* pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
* single-segment behaviour. * single-segment behaviour.
* *
* We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
* when atomic and when not atomic. This is ok because * atomic and when not atomic. This is ok because it calls
* __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
* and it is ok to call this when non-atomic. * fact, the only difference between __copy_from_user_inatomic() and
* Infact, the only difference between __copy_from_user_inatomic() and
* __copy_from_user() is that the latter calls might_sleep() and the former * __copy_from_user() is that the latter calls might_sleep() and the former
* should not zero the tail of the buffer on error. And on many * should not zero the tail of the buffer on error. And on many architectures
* architectures __copy_from_user_inatomic() is just defined to * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
* __copy_from_user() so it makes no difference at all on those architectures. * makes no difference at all on those architectures.
*/ */
static inline size_t ntfs_copy_from_user_iovec(struct page **pages, static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
unsigned nr_pages, unsigned ofs, const struct iovec **iov, unsigned nr_pages, unsigned ofs, const struct iovec **iov,
@ -1409,28 +1408,28 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
if (unlikely(copied != len)) { if (unlikely(copied != len)) {
/* Do it the slow way. */ /* Do it the slow way. */
addr = kmap(*pages); addr = kmap(*pages);
copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, copied = __ntfs_copy_from_user_iovec_inatomic(addr +
*iov, *iov_ofs, len); ofs, *iov, *iov_ofs, len);
/*
* Zero the rest of the target like __copy_from_user().
*/
memset(addr + ofs + copied, 0, len - copied);
kunmap(*pages);
if (unlikely(copied != len)) if (unlikely(copied != len))
goto err_out; goto err_out;
kunmap(*pages);
} }
total += len; total += len;
ntfs_set_next_iovec(iov, iov_ofs, len);
bytes -= len; bytes -= len;
if (!bytes) if (!bytes)
break; break;
ntfs_set_next_iovec(iov, iov_ofs, len);
ofs = 0; ofs = 0;
} while (++pages < last_page); } while (++pages < last_page);
out: out:
return total; return total;
err_out: err_out:
total += copied; BUG_ON(copied > len);
/* Zero the rest of the target like __copy_from_user(). */ /* Zero the rest of the target like __copy_from_user(). */
memset(addr + ofs + copied, 0, len - copied);
kunmap(*pages);
total += copied;
ntfs_set_next_iovec(iov, iov_ofs, copied);
while (++pages < last_page) { while (++pages < last_page) {
bytes -= len; bytes -= len;
if (!bytes) if (!bytes)

View File

@ -1,7 +1,7 @@
/* /*
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
* *
* Copyright (c) 2001-2007 Anton Altaparmakov * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2001,2002 Richard Russon * Copyright (c) 2001,2002 Richard Russon
* *
* This program/include file is free software; you can redistribute it and/or * This program/include file is free software; you can redistribute it and/or
@ -3193,8 +3193,8 @@ static void __exit exit_ntfs_fs(void)
ntfs_sysctl(0); ntfs_sysctl(0);
} }
MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>"); MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov"); MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.");
MODULE_VERSION(NTFS_VERSION); MODULE_VERSION(NTFS_VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#ifdef DEBUG #ifdef DEBUG

View File

@ -94,7 +94,7 @@ struct xenbus_driver {
int (*remove)(struct xenbus_device *dev); int (*remove)(struct xenbus_device *dev);
int (*suspend)(struct xenbus_device *dev, pm_message_t state); int (*suspend)(struct xenbus_device *dev, pm_message_t state);
int (*resume)(struct xenbus_device *dev); int (*resume)(struct xenbus_device *dev);
int (*uevent)(struct xenbus_device *, char **, int, char *, int); int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
struct device_driver driver; struct device_driver driver;
int (*read_otherend_details)(struct xenbus_device *dev); int (*read_otherend_details)(struct xenbus_device *dev);
int (*is_ready)(struct xenbus_device *dev); int (*is_ready)(struct xenbus_device *dev);

View File

@ -0,0 +1,8 @@
turbostat : turbostat.c
clean :
rm -f turbostat
install :
install turbostat /usr/bin/turbostat
install turbostat.8 /usr/share/man/man8

View File

@ -0,0 +1,172 @@
.TH TURBOSTAT 8
.SH NAME
turbostat \- Report processor frequency and idle statistics
.SH SYNOPSIS
.ft B
.B turbostat
.RB [ "\-v" ]
.RB [ "\-M MSR#" ]
.RB command
.br
.B turbostat
.RB [ "\-v" ]
.RB [ "\-M MSR#" ]
.RB [ "\-i interval_sec" ]
.SH DESCRIPTION
\fBturbostat \fP reports processor topology, frequency
and idle power state statistics on modern X86 processors.
Either \fBcommand\fP is forked and statistics are printed
upon its completion, or statistics are printed periodically.
\fBturbostat \fP
requires that the processor
supports an "invariant" TSC, plus the APERF and MPERF MSRs.
\fBturbostat \fP will report idle cpu power state residency
on processors that additionally support C-state residency counters.
.SS Options
The \fB-v\fP option increases verbosity.
.PP
The \fB-M MSR#\fP option dumps the specified MSR,
in addition to the usual frequency and idle statistics.
.PP
The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
The default is 5 seconds.
.PP
The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
displays the statistics gathered since it was forked.
.PP
.SH FIELD DESCRIPTIONS
.nf
\fBpkg\fP processor package number.
\fBcore\fP processor core number.
\fBCPU\fP Linux CPU (logical processor) number.
\fB%c0\fP percent of the interval that the CPU retired instructions.
\fBGHz\fP average clock rate while the CPU was in c0 state.
\fBTSC\fP average GHz that the TSC ran during the entire interval.
\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
.fi
.PP
.SH EXAMPLE
Without any parameters, turbostat prints out counters ever 5 seconds.
(override interval with "-i sec" option, or specify a command
for turbostat to fork).
The first row of statistics reflect the average for the entire system.
Subsequent rows show per-CPU statistics.
.nf
[root@x980]# ./turbostat
core CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
0.04 1.62 3.38 0.11 0.00 99.85 0.00 95.07
0 0 0.04 1.62 3.38 0.06 0.00 99.90 0.00 95.07
0 6 0.02 1.62 3.38 0.08 0.00 99.90 0.00 95.07
1 2 0.10 1.62 3.38 0.29 0.00 99.61 0.00 95.07
1 8 0.11 1.62 3.38 0.28 0.00 99.61 0.00 95.07
2 4 0.01 1.62 3.38 0.01 0.00 99.98 0.00 95.07
2 10 0.01 1.61 3.38 0.02 0.00 99.98 0.00 95.07
8 1 0.07 1.62 3.38 0.15 0.00 99.78 0.00 95.07
8 7 0.03 1.62 3.38 0.19 0.00 99.78 0.00 95.07
9 3 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
9 9 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07
10 5 0.01 1.62 3.38 0.13 0.00 99.86 0.00 95.07
10 11 0.08 1.62 3.38 0.05 0.00 99.86 0.00 95.07
.fi
.SH VERBOSE EXAMPLE
The "-v" option adds verbosity to the output:
.nf
GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2)
12 * 133 = 1600 MHz max efficiency
25 * 133 = 3333 MHz TSC frequency
26 * 133 = 3467 MHz max turbo 4 active cores
26 * 133 = 3467 MHz max turbo 3 active cores
27 * 133 = 3600 MHz max turbo 2 active cores
27 * 133 = 3600 MHz max turbo 1 active cores
.fi
The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
available at the minimum package voltage. The \fBTSC frequency\fP is the nominal
maximum frequency of the processor if turbo-mode were not available. This frequency
should be sustainable on all CPUs indefinitely, given nominal power and cooling.
The remaining rows show what maximum turbo frequency is possible
depending on the number of idle cores. Note that this information is
not available on all processors.
.SH FORK EXAMPLE
If turbostat is invoked with a command, it will fork that command
and output the statistics gathered when the command exits.
eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
until ^C while the other CPUs are mostly idle:
.nf
[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
^Ccore CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
8.49 3.63 3.38 16.23 0.66 74.63 0.00 0.00
0 0 1.22 3.62 3.38 32.18 0.00 66.60 0.00 0.00
0 6 0.40 3.61 3.38 33.00 0.00 66.60 0.00 0.00
1 2 0.11 3.14 3.38 0.19 3.95 95.75 0.00 0.00
1 8 0.05 2.88 3.38 0.25 3.95 95.75 0.00 0.00
2 4 0.00 3.13 3.38 0.02 0.00 99.98 0.00 0.00
2 10 0.00 3.09 3.38 0.02 0.00 99.98 0.00 0.00
8 1 0.04 3.50 3.38 14.43 0.00 85.54 0.00 0.00
8 7 0.03 2.98 3.38 14.43 0.00 85.54 0.00 0.00
9 3 0.00 3.16 3.38 100.00 0.00 0.00 0.00 0.00
9 9 99.93 3.63 3.38 0.06 0.00 0.00 0.00 0.00
10 5 0.01 2.82 3.38 0.08 0.00 99.91 0.00 0.00
10 11 0.02 3.36 3.38 0.06 0.00 99.91 0.00 0.00
6.950866 sec
.fi
Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
while the other processors are generally in various states of idle.
Note that cpu3 is an HT sibling sharing core9
with cpu9, and thus it is unable to get to an idle state
deeper than c1 while cpu9 is busy.
Note that turbostat reports average GHz of 3.61, while
the arithmetic average of the GHz column above is 3.24.
This is a weighted average, where the weight is %c0. ie. it is the total number of
un-halted cycles elapsed per time divided by the number of CPUs.
.SH NOTES
.B "turbostat "
must be run as root.
.B "turbostat "
reads hardware counters, but doesn't write them.
So it will not interfere with the OS or other programs, including
multiple invocations of itself.
\fBturbostat \fP
may work poorly on Linux-2.6.20 through 2.6.29,
as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
in those kernels.
The APERF, MPERF MSRs are defined to count non-halted cycles.
Although it is not guaranteed by the architecture, turbostat assumes
that they count at TSC rate, which is true on all processors tested to date.
.SH REFERENCES
"Intel® Turbo Boost Technology
in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
http://download.intel.com/design/processor/applnots/320354.pdf
"Intel® 64 and IA-32 Architectures Software Developer's Manual
Volume 3B: System Programming Guide"
http://www.intel.com/products/processor/manuals/
.SH FILES
.ta
.nf
/dev/cpu/*/msr
.fi
.SH "SEE ALSO"
msr(4), vmstat(8)
.PP
.SH AUTHORS
.nf
Written by Len Brown <len.brown@intel.com>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,8 @@
x86_energy_perf_policy : x86_energy_perf_policy.c
clean :
rm -f x86_energy_perf_policy
install :
install x86_energy_perf_policy /usr/bin/
install x86_energy_perf_policy.8 /usr/share/man/man8/

View File

@ -0,0 +1,104 @@
.\" This page Copyright (C) 2010 Len Brown <len.brown@intel.com>
.\" Distributed under the GPL, Copyleft 1994.
.TH X86_ENERGY_PERF_POLICY 8
.SH NAME
x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS
.SH SYNOPSIS
.ft B
.B x86_energy_perf_policy
.RB [ "\-c cpu" ]
.RB [ "\-v" ]
.RB "\-r"
.br
.B x86_energy_perf_policy
.RB [ "\-c cpu" ]
.RB [ "\-v" ]
.RB 'performance'
.br
.B x86_energy_perf_policy
.RB [ "\-c cpu" ]
.RB [ "\-v" ]
.RB 'normal'
.br
.B x86_energy_perf_policy
.RB [ "\-c cpu" ]
.RB [ "\-v" ]
.RB 'powersave'
.br
.B x86_energy_perf_policy
.RB [ "\-c cpu" ]
.RB [ "\-v" ]
.RB n
.br
.SH DESCRIPTION
\fBx86_energy_perf_policy\fP
allows software to convey
its policy for the relative importance of performance
versus energy savings to the processor.
The processor uses this information in model-specific ways
when it must select trade-offs between performance and
energy efficiency.
This policy hint does not supersede Processor Performance states
(P-states) or CPU Idle power states (C-states), but allows
software to have influence where it would otherwise be unable
to express a preference.
For example, this setting may tell the hardware how
aggressively or conservatively to control frequency
in the "turbo range" above the explicitly OS-controlled
P-state frequency range. It may also tell the hardware
how aggressively is should enter the OS requested C-states.
Support for this feature is indicated by CPUID.06H.ECX.bit3
per the Intel Architectures Software Developer's Manual.
.SS Options
\fB-c\fP limits operation to a single CPU.
The default is to operate on all CPUs.
Note that MSR_IA32_ENERGY_PERF_BIAS is defined per
logical processor, but that the initial implementations
of the MSR were shared among all processors in each package.
.PP
\fB-v\fP increases verbosity. By default
x86_energy_perf_policy is silent.
.PP
\fB-r\fP is for "read-only" mode - the unchanged state
is read and displayed.
.PP
.I performance
Set a policy where performance is paramount.
The processor will be unwilling to sacrifice any performance
for the sake of energy saving. This is the hardware default.
.PP
.I normal
Set a policy with a normal balance between performance and energy efficiency.
The processor will tolerate minor performance compromise
for potentially significant energy savings.
This reasonable default for most desktops and servers.
.PP
.I powersave
Set a policy where the processor can accept
a measurable performance hit to maximize energy efficiency.
.PP
.I n
Set MSR_IA32_ENERGY_PERF_BIAS to the specified number.
The range of valid numbers is 0-15, where 0 is maximum
performance and 15 is maximum energy efficiency.
.SH NOTES
.B "x86_energy_perf_policy "
runs only as root.
.SH FILES
.ta
.nf
/dev/cpu/*/msr
.fi
.SH "SEE ALSO"
msr(4)
.PP
.SH AUTHORS
.nf
Written by Len Brown <len.brown@intel.com>

View File

@ -0,0 +1,325 @@
/*
* x86_energy_perf_policy -- set the energy versus performance
* policy preference bias on recent X86 processors.
*/
/*
* Copyright (c) 2010, Intel Corporation.
* Len Brown <len.brown@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/time.h>
#include <stdlib.h>
#include <string.h>
unsigned int verbose; /* set with -v */
unsigned int read_only; /* set with -r */
char *progname;
unsigned long long new_bias;
int cpu = -1;
/*
* Usage:
*
* -c cpu: limit action to a single CPU (default is all CPUs)
* -v: verbose output (can invoke more than once)
* -r: read-only, don't change any settings
*
* performance
* Performance is paramount.
* Unwilling to sacrafice any performance
* for the sake of energy saving. (hardware default)
*
* normal
* Can tolerate minor performance compromise
* for potentially significant energy savings.
* (reasonable default for most desktops and servers)
*
* powersave
* Can tolerate significant performance hit
* to maximize energy savings.
*
* n
* a numerical value to write to the underlying MSR.
*/
void usage(void)
{
printf("%s: [-c cpu] [-v] "
"(-r | 'performance' | 'normal' | 'powersave' | n)\n",
progname);
exit(1);
}
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
#define BIAS_PERFORMANCE 0
#define BIAS_BALANCE 6
#define BIAS_POWERSAVE 15
void cmdline(int argc, char **argv)
{
int opt;
progname = argv[0];
while ((opt = getopt(argc, argv, "+rvc:")) != -1) {
switch (opt) {
case 'c':
cpu = atoi(optarg);
break;
case 'r':
read_only = 1;
break;
case 'v':
verbose++;
break;
default:
usage();
}
}
/* if -r, then should be no additional optind */
if (read_only && (argc > optind))
usage();
/*
* if no -r , then must be one additional optind
*/
if (!read_only) {
if (argc != optind + 1) {
printf("must supply -r or policy param\n");
usage();
}
if (!strcmp("performance", argv[optind])) {
new_bias = BIAS_PERFORMANCE;
} else if (!strcmp("normal", argv[optind])) {
new_bias = BIAS_BALANCE;
} else if (!strcmp("powersave", argv[optind])) {
new_bias = BIAS_POWERSAVE;
} else {
char *endptr;
new_bias = strtoull(argv[optind], &endptr, 0);
if (endptr == argv[optind] ||
new_bias > BIAS_POWERSAVE) {
fprintf(stderr, "invalid value: %s\n",
argv[optind]);
usage();
}
}
}
}
/*
* validate_cpuid()
* returns on success, quietly exits on failure (make verbose with -v)
*/
void validate_cpuid(void)
{
unsigned int eax, ebx, ecx, edx, max_level;
char brand[16];
unsigned int fms, family, model, stepping;
eax = ebx = ecx = edx = 0;
asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx),
"=d" (edx) : "a" (0));
if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) {
if (verbose)
fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel",
(char *)&ebx, (char *)&edx, (char *)&ecx);
exit(1);
}
asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
family = (fms >> 8) & 0xf;
model = (fms >> 4) & 0xf;
stepping = fms & 0xf;
if (family == 6 || family == 0xf)
model += ((fms >> 16) & 0xf) << 4;
if (verbose > 1)
printf("CPUID %s %d levels family:model:stepping "
"0x%x:%x:%x (%d:%d:%d)\n", brand, max_level,
family, model, stepping, family, model, stepping);
if (!(edx & (1 << 5))) {
if (verbose)
printf("CPUID: no MSR\n");
exit(1);
}
/*
* Support for MSR_IA32_ENERGY_PERF_BIAS
* is indicated by CPUID.06H.ECX.bit3
*/
asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6));
if (verbose)
printf("CPUID.06H.ECX: 0x%x\n", ecx);
if (!(ecx & (1 << 3))) {
if (verbose)
printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n");
exit(1);
}
return; /* success */
}
unsigned long long get_msr(int cpu, int offset)
{
unsigned long long msr;
char msr_path[32];
int retval;
int fd;
sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
fd = open(msr_path, O_RDONLY);
if (fd < 0) {
printf("Try \"# modprobe msr\"\n");
perror(msr_path);
exit(1);
}
retval = pread(fd, &msr, sizeof msr, offset);
if (retval != sizeof msr) {
printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
exit(-2);
}
close(fd);
return msr;
}
unsigned long long put_msr(int cpu, unsigned long long new_msr, int offset)
{
unsigned long long old_msr;
char msr_path[32];
int retval;
int fd;
sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
fd = open(msr_path, O_RDWR);
if (fd < 0) {
perror(msr_path);
exit(1);
}
retval = pread(fd, &old_msr, sizeof old_msr, offset);
if (retval != sizeof old_msr) {
perror("pwrite");
printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
exit(-2);
}
retval = pwrite(fd, &new_msr, sizeof new_msr, offset);
if (retval != sizeof new_msr) {
perror("pwrite");
printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval);
exit(-2);
}
close(fd);
return old_msr;
}
void print_msr(int cpu)
{
printf("cpu%d: 0x%016llx\n",
cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS));
}
void update_msr(int cpu)
{
unsigned long long previous_msr;
previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS);
if (verbose)
printf("cpu%d msr0x%x 0x%016llx -> 0x%016llx\n",
cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias);
return;
}
char *proc_stat = "/proc/stat";
/*
* run func() on every cpu in /dev/cpu
*/
void for_every_cpu(void (func)(int))
{
FILE *fp;
int retval;
fp = fopen(proc_stat, "r");
if (fp == NULL) {
perror(proc_stat);
exit(1);
}
retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
if (retval != 0) {
perror("/proc/stat format");
exit(1);
}
while (1) {
int cpu;
retval = fscanf(fp,
"cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
&cpu);
if (retval != 1)
return;
func(cpu);
}
fclose(fp);
}
int main(int argc, char **argv)
{
cmdline(argc, argv);
if (verbose > 1)
printf("x86_energy_perf_policy Nov 24, 2010"
" - Len Brown <lenb@kernel.org>\n");
if (verbose > 1 && !read_only)
printf("new_bias %lld\n", new_bias);
validate_cpuid();
if (cpu != -1) {
if (read_only)
print_msr(cpu);
else
update_msr(cpu);
} else {
if (read_only)
for_every_cpu(print_msr);
else
for_every_cpu(update_msr);
}
return 0;
}

View File

@ -0,0 +1,30 @@
#!/usr/bin/perl
open (IN,"ktest.pl");
while (<IN>) {
if (/\$opt\{"?([A-Z].*?)(\[.*\])?"?\}/ ||
/set_test_option\("(.*?)"/) {
$opt{$1} = 1;
}
}
close IN;
open (IN, "sample.conf");
while (<IN>) {
if (/^\s*#?\s*(\S+)\s*=/) {
$samp{$1} = 1;
}
}
close IN;
foreach $opt (keys %opt) {
if (!defined($samp{$opt})) {
print "opt = $opt\n";
}
}
foreach $samp (keys %samp) {
if (!defined($opt{$samp})) {
print "samp = $samp\n";
}
}

2023
tools/testing/ktest/ktest.pl Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,622 @@
#
# Config file for ktest.pl
#
# Note, all paths must be absolute
#
# Options set in the beginning of the file are considered to be
# default options. These options can be overriden by test specific
# options, with the following exceptions:
#
# LOG_FILE
# CLEAR_LOG
# POWEROFF_ON_SUCCESS
# REBOOT_ON_SUCCESS
#
# Test specific options are set after the label:
#
# TEST_START
#
# The options after a TEST_START label are specific to that test.
# Each TEST_START label will set up a new test. If you want to
# perform a test more than once, you can add the ITERATE label
# to it followed by the number of times you want that test
# to iterate. If the ITERATE is left off, the test will only
# be performed once.
#
# TEST_START ITERATE 10
#
# You can skip a test by adding SKIP (before or after the ITERATE
# and number)
#
# TEST_START SKIP
#
# TEST_START SKIP ITERATE 10
#
# TEST_START ITERATE 10 SKIP
#
# The SKIP label causes the options and the test itself to be ignored.
# This is useful to set up several different tests in one config file, and
# only enabling the ones you want to use for a current test run.
#
# You can add default options anywhere in the file as well
# with the DEFAULTS tag. This allows you to have default options
# after the test options to keep the test options at the top
# of the file. You can even place the DEFAULTS tag between
# test cases (but not in the middle of a single test case)
#
# TEST_START
# MIN_CONFIG = /home/test/config-test1
#
# DEFAULTS
# MIN_CONFIG = /home/test/config-default
#
# TEST_START ITERATE 10
#
# The above will run the first test with MIN_CONFIG set to
# /home/test/config-test-1. Then 10 tests will be executed
# with MIN_CONFIG with /home/test/config-default.
#
# You can also disable defaults with the SKIP option
#
# DEFAULTS SKIP
# MIN_CONFIG = /home/test/config-use-sometimes
#
# DEFAULTS
# MIN_CONFIG = /home/test/config-most-times
#
# The above will ignore the first MIN_CONFIG. If you want to
# use the first MIN_CONFIG, remove the SKIP from the first
# DEFAULTS tag and add it to the second. Be careful, options
# may only be declared once per test or default. If you have
# the same option name under the same test or as default
# ktest will fail to execute, and no tests will run.
#
#### Mandatory Default Options ####
# These options must be in the default section, although most
# may be overridden by test options.
# The machine hostname that you will test
#MACHINE = target
# The box is expected to have ssh on normal bootup, provide the user
# (most likely root, since you need privileged operations)
#SSH_USER = root
# The directory that contains the Linux source code
#BUILD_DIR = /home/test/linux.git
# The directory that the objects will be built
# (can not be same as BUILD_DIR)
#OUTPUT_DIR = /home/test/build/target
# The location of the compiled file to copy to the target
# (relative to OUTPUT_DIR)
#BUILD_TARGET = arch/x86/boot/bzImage
# The place to put your image on the test machine
#TARGET_IMAGE = /boot/vmlinuz-test
# A script or command to reboot the box
#
# Here is a digital loggers power switch example
#POWER_CYCLE = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
#
# Here is an example to reboot a virtual box on the current host
# with the name "Guest".
#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
# The script or command that reads the console
#
# If you use ttywatch server, something like the following would work.
#CONSOLE = nc -d localhost 3001
#
# For a virtual machine with guest name "Guest".
#CONSOLE = virsh console Guest
# Required version ending to differentiate the test
# from other linux builds on the system.
#LOCALVERSION = -test
# The grub title name for the test kernel to boot
# (Only mandatory if REBOOT_TYPE = grub)
#
# Note, ktest.pl will not update the grub menu.lst, you need to
# manually add an option for the test. ktest.pl will search
# the grub menu.lst for this option to find what kernel to
# reboot into.
#
# For example, if in the /boot/grub/menu.lst the test kernel title has:
# title Test Kernel
# kernel vmlinuz-test
#GRUB_MENU = Test Kernel
# A script to reboot the target into the test kernel
# (Only mandatory if REBOOT_TYPE = script)
#REBOOT_SCRIPT =
#### Optional Config Options (all have defaults) ####
# Start a test setup. If you leave this off, all options
# will be default and the test will run once.
# This is a label and not really an option (it takes no value).
# You can append ITERATE and a number after it to iterate the
# test a number of times, or SKIP to ignore this test.
#
#TEST_START
#TEST_START ITERATE 5
#TEST_START SKIP
# Have the following options as default again. Used after tests
# have already been defined by TEST_START. Optionally, you can
# just define all default options before the first TEST_START
# and you do not need this option.
#
# This is a label and not really an option (it takes no value).
# You can append SKIP to this label and the options within this
# section will be ignored.
#
# DEFAULTS
# DEFAULTS SKIP
# The default test type (default test)
# The test types may be:
# build - only build the kernel, do nothing else
# boot - build and boot the kernel
# test - build, boot and if TEST is set, run the test script
# (If TEST is not set, it defaults back to boot)
# bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
#TEST_TYPE = test
# Test to run if there is a successful boot and TEST_TYPE is test.
# Must exit with 0 on success and non zero on error
# default (undefined)
#TEST = ssh user@machine /root/run_test
# The build type is any make config type or special command
# (default randconfig)
# nobuild - skip the clean and build step
# useconfig:/path/to/config - use the given config and run
# oldconfig on it.
# This option is ignored if TEST_TYPE is patchcheck or bisect
#BUILD_TYPE = randconfig
# The make command (default make)
# If you are building a 32bit x86 on a 64 bit host
#MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386
# Any build options for the make of the kernel (not for other makes, like configs)
# (default "")
#BUILD_OPTIONS = -j20
# If you need an initrd, you can add a script or code here to install
# it. The environment variable KERNEL_VERSION will be set to the
# kernel version that is used. Remember to add the initrd line
# to your grub menu.lst file.
#
# Here's a couple of examples to use:
#POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
#
# or on some systems:
#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
# Way to reboot the box to the test kernel.
# Only valid options so far are "grub" and "script"
# (default grub)
# If you specify grub, it will assume grub version 1
# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
# and select that target to reboot to the kernel. If this is not
# your setup, then specify "script" and have a command or script
# specified in REBOOT_SCRIPT to boot to the target.
#
# The entry in /boot/grub/menu.lst must be entered in manually.
# The test will not modify that file.
#REBOOT_TYPE = grub
# The min config that is needed to build for the machine
# A nice way to create this is with the following:
#
# $ ssh target
# $ lsmod > mymods
# $ scp mymods host:/tmp
# $ exit
# $ cd linux.git
# $ rm .config
# $ make LSMOD=mymods localyesconfig
# $ grep '^CONFIG' .config > /home/test/config-min
#
# If you want even less configs:
#
# log in directly to target (do not ssh)
#
# $ su
# # lsmod | cut -d' ' -f1 | xargs rmmod
#
# repeat the above several times
#
# # lsmod > mymods
# # reboot
#
# May need to reboot to get your network back to copy the mymods
# to the host, and then remove the previous .config and run the
# localyesconfig again. The CONFIG_MIN generated like this will
# not guarantee network activity to the box so the TEST_TYPE of
# test may fail.
#
# You might also want to set:
# CONFIG_CMDLINE="<your options here>"
# randconfig may set the above and override your real command
# line options.
# (default undefined)
#MIN_CONFIG = /home/test/config-min
# Sometimes there's options that just break the boot and
# you do not care about. Here are a few:
# # CONFIG_STAGING is not set
# Staging drivers are horrible, and can break the build.
# # CONFIG_SCSI_DEBUG is not set
# SCSI_DEBUG may change your root partition
# # CONFIG_KGDB_SERIAL_CONSOLE is not set
# KGDB may cause oops waiting for a connection that's not there.
# This option points to the file containing config options that will be prepended
# to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
#
# Note, config options in MIN_CONFIG will override these options.
#
# (default undefined)
#ADD_CONFIG = /home/test/config-broken
# The location on the host where to write temp files
# (default /tmp/ktest)
#TMP_DIR = /tmp/ktest
# Optional log file to write the status (recommended)
# Note, this is a DEFAULT section only option.
# (default undefined)
#LOG_FILE = /home/test/logfiles/target.log
# Remove old logfile if it exists before starting all tests.
# Note, this is a DEFAULT section only option.
# (default 0)
#CLEAR_LOG = 0
# Line to define a successful boot up in console output.
# This is what the line contains, not the entire line. If you need
# the entire line to match, then use regural expression syntax like:
# (do not add any quotes around it)
#
# SUCCESS_LINE = ^MyBox Login:$
#
# (default "login:")
#SUCCESS_LINE = login:
# In case the console constantly fills the screen, having
# a specified time to stop the test after success is recommended.
# (in seconds)
# (default 10)
#STOP_AFTER_SUCCESS = 10
# In case the console constantly fills the screen, having
# a specified time to stop the test after failure is recommended.
# (in seconds)
# (default 60)
#STOP_AFTER_FAILURE = 60
# Stop testing if a build fails. If set, the script will end if
# a failure is detected, otherwise it will save off the .config,
# dmesg and bootlog in a directory called
# MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss
# if the STORE_FAILURES directory is set.
# (default 1)
# Note, even if this is set to zero, there are some errors that still
# stop the tests.
#DIE_ON_FAILURE = 1
# Directory to store failure directories on failure. If this is not
# set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
# (default undefined)
#STORE_FAILURES = /home/test/failures
# Build without doing a make mrproper, or removing .config
# (default 0)
#BUILD_NOCLEAN = 0
# As the test reads the console, after it hits the SUCCESS_LINE
# the time it waits for the monitor to settle down between reads
# can usually be lowered.
# (in seconds) (default 1)
#BOOTED_TIMEOUT = 1
# The timeout in seconds when we consider the box hung after
# the console stop producing output. Be sure to leave enough
# time here to get pass a reboot. Some machines may not produce
# any console output for a long time during a reboot. You do
# not want the test to fail just because the system was in
# the process of rebooting to the test kernel.
# (default 120)
#TIMEOUT = 120
# In between tests, a reboot of the box may occur, and this
# is the time to wait for the console after it stops producing
# output. Some machines may not produce a large lag on reboot
# so this should accommodate it.
# The difference between this and TIMEOUT, is that TIMEOUT happens
# when rebooting to the test kernel. This sleep time happens
# after a test has completed and we are about to start running
# another test. If a reboot to the reliable kernel happens,
# we wait SLEEP_TIME for the console to stop producing output
# before starting the next test.
# (default 60)
#SLEEP_TIME = 60
# The time in between bisects to sleep (in seconds)
# (default 60)
#BISECT_SLEEP_TIME = 60
# Reboot the target box on error (default 0)
#REBOOT_ON_ERROR = 0
# Power off the target on error (ignored if REBOOT_ON_ERROR is set)
# Note, this is a DEFAULT section only option.
# (default 0)
#POWEROFF_ON_ERROR = 0
# Power off the target after all tests have completed successfully
# Note, this is a DEFAULT section only option.
# (default 0)
#POWEROFF_ON_SUCCESS = 0
# Reboot the target after all test completed successfully (default 1)
# (ignored if POWEROFF_ON_SUCCESS is set)
#REBOOT_ON_SUCCESS = 1
# In case there are isses with rebooting, you can specify this
# to always powercycle after this amount of time after calling
# reboot.
# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
# makes it powercycle immediately after rebooting. Do not define
# it if you do not want it.
# (default undefined)
#POWERCYCLE_AFTER_REBOOT = 5
# In case there's isses with halting, you can specify this
# to always poweroff after this amount of time after calling
# halt.
# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
# makes it poweroff immediately after halting. Do not define
# it if you do not want it.
# (default undefined)
#POWEROFF_AFTER_HALT = 20
# A script or command to power off the box (default undefined)
# Needed for POWEROFF_ON_ERROR and SUCCESS
#
# Example for digital loggers power switch:
#POWER_OFF = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
#
# Example for a virtual guest call "Guest".
#POWER_OFF = virsh destroy Guest
# The way to execute a command on the target
# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
#SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND";
# The way to copy a file to the target
# (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE)
# The variables SSH_USER, MACHINE, SRC_FILE and DST_FILE are defined.
#SCP_TO_TARGET = scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE
# The nice way to reboot the target
# (default ssh $SSH_USER@$MACHINE reboot)
# The variables SSH_USER and MACHINE are defined.
#REBOOT = ssh $SSH_USER@$MACHINE reboot
#### Per test run options ####
# The following options are only allowed in TEST_START sections.
# They are ignored in the DEFAULTS sections.
#
# All of these are optional and undefined by default, although
# some of these options are required for TEST_TYPE of patchcheck
# and bisect.
#
#
# CHECKOUT = branch
#
# If the BUILD_DIR is a git repository, then you can set this option
# to checkout the given branch before running the TEST. If you
# specify this for the first run, that branch will be used for
# all preceding tests until a new CHECKOUT is set.
#
#
#
# For TEST_TYPE = patchcheck
#
# This expects the BUILD_DIR to be a git repository, and
# will checkout the PATCHCHECK_START commit.
#
# The option BUILD_TYPE will be ignored.
#
# The MIN_CONFIG will be used for all builds of the patchcheck. The build type
# used for patchcheck is oldconfig.
#
# PATCHCHECK_START is required and is the first patch to
# test (the SHA1 of the commit). You may also specify anything
# that git checkout allows (branch name, tage, HEAD~3).
#
# PATCHCHECK_END is the last patch to check (default HEAD)
#
# PATCHCHECK_TYPE is required and is the type of test to run:
# build, boot, test.
#
# Note, the build test will look for warnings, if a warning occurred
# in a file that a commit touches, the build will fail.
#
# If BUILD_NOCLEAN is set, then make mrproper will not be run on
# any of the builds, just like all other TEST_TYPE tests. But
# what makes patchcheck different from the other tests, is if
# BUILD_NOCLEAN is not set, only the first and last patch run
# make mrproper. This helps speed up the test.
#
# Example:
# TEST_START
# TEST_TYPE = patchcheck
# CHECKOUT = mybranch
# PATCHCHECK_TYPE = boot
# PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
# PATCHCHECK_END = HEAD~2
#
#
#
# For TEST_TYPE = bisect
#
# You can specify a git bisect if the BUILD_DIR is a git repository.
# The MIN_CONFIG will be used for all builds of the bisect. The build type
# used for bisecting is oldconfig.
#
# The option BUILD_TYPE will be ignored.
#
# BISECT_TYPE is the type of test to perform:
# build - bad fails to build
# boot - bad builds but fails to boot
# test - bad boots but fails a test
#
# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
#
# The above three options are required for a bisect operation.
#
# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
#
# If an operation failed in the bisect that was not expected to
# fail. Then the test ends. The state of the BUILD_DIR will be
# left off at where the failure occurred. You can examine the
# reason for the failure, and perhaps even find a git commit
# that would work to continue with. You can run:
#
# git bisect log > /path/to/replay/file
#
# The adding:
#
# BISECT_REPLAY= /path/to/replay/file
#
# And running the test again. The test will perform the initial
# git bisect start, git bisect good, and git bisect bad, and
# then it will run git bisect replay on this file, before
# continuing with the bisect.
#
# BISECT_START = commit (optional, default undefined)
#
# As with BISECT_REPLAY, if the test failed on a commit that
# just happen to have a bad commit in the middle of the bisect,
# and you need to skip it. If BISECT_START is defined, it
# will checkout that commit after doing the initial git bisect start,
# git bisect good, git bisect bad, and running the git bisect replay
# if the BISECT_REPLAY is set.
#
# BISECT_REVERSE = 1 (optional, default 0)
#
# In those strange instances where it was broken forever
# and you are trying to find where it started to work!
# Set BISECT_GOOD to the commit that was last known to fail
# Set BISECT_BAD to the commit that is known to start working.
# With BISECT_REVERSE = 1, The test will consider failures as
# good, and success as bad.
#
# BISECT_CHECK = 1 (optional, default 0)
#
# Just to be sure the good is good and bad is bad, setting
# BISECT_CHECK to 1 will start the bisect by first checking
# out BISECT_BAD and makes sure it fails, then it will check
# out BISECT_GOOD and makes sure it succeeds before starting
# the bisect (it works for BISECT_REVERSE too).
#
# You can limit the test to just check BISECT_GOOD or
# BISECT_BAD with BISECT_CHECK = good or
# BISECT_CHECK = bad, respectively.
#
# Example:
# TEST_START
# TEST_TYPE = bisect
# BISECT_GOOD = v2.6.36
# BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
# BISECT_TYPE = build
# MIN_CONFIG = /home/test/config-bisect
#
#
#
# For TEST_TYPE = config_bisect
#
# In those cases that you have two different configs. One of them
# work, the other does not, and you do not know what config causes
# the problem.
# The TEST_TYPE config_bisect will bisect the bad config looking for
# what config causes the failure.
#
# The way it works is this:
#
# First it finds a config to work with. Since a different version, or
# MIN_CONFIG may cause different dependecies, it must run through this
# preparation.
#
# Overwrites any config set in the bad config with a config set in
# either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
# are minimal and do not disable configs you want to test:
# (ie. # CONFIG_FOO is not set).
#
# An oldconfig is run on the bad config and any new config that
# appears will be added to the configs to test.
#
# Finally, it generates a config with the above result and runs it
# again through make oldconfig to produce a config that should be
# satisfied by kconfig.
#
# Then it starts the bisect.
#
# The configs to test are cut in half. If all the configs in this
# half depend on a config in the other half, then the other half
# is tested instead. If no configs are enabled by either half, then
# this means a circular dependency exists and the test fails.
#
# A config is created with the test half, and the bisect test is run.
#
# If the bisect succeeds, then all configs in the generated config
# are removed from the configs to test and added to the configs that
# will be enabled for all builds (they will be enabled, but not be part
# of the configs to examine).
#
# If the bisect fails, then all test configs that were not enabled by
# the config file are removed from the test. These configs will not
# be enabled in future tests. Since current config failed, we consider
# this to be a subset of the config that we started with.
#
# When we are down to one config, it is considered the bad config.
#
# Note, the config chosen may not be the true bad config. Due to
# dependencies and selections of the kbuild system, mulitple
# configs may be needed to cause a failure. If you disable the
# config that was found and restart the test, if the test fails
# again, it is recommended to rerun the config_bisect with a new
# bad config without the found config enabled.
#
# The option BUILD_TYPE will be ignored.
#
# CONFIG_BISECT_TYPE is the type of test to perform:
# build - bad fails to build
# boot - bad builds but fails to boot
# test - bad boots but fails a test
#
# CONFIG_BISECT is the config that failed to boot
#
# Example:
# TEST_START
# TEST_TYPE = config_bisect
# CONFIG_BISECT_TYPE = build
# CONFIG_BISECT = /home/test/¢onfig-bad
# MIN_CONFIG = /home/test/config-min
#