Merge branch 'master' into for-linus
This commit is contained in:
commit
9c78965ca1
|
@ -133,46 +133,6 @@ Description:
|
|||
The symbolic link points to the PCI device sysfs entry of the
|
||||
Physical Function this device associates with.
|
||||
|
||||
|
||||
What: /sys/bus/pci/slots/...
|
||||
Date: April 2005 (possibly older)
|
||||
KernelVersion: 2.6.12 (possibly older)
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
When the appropriate driver is loaded, it will create a
|
||||
directory per claimed physical PCI slot in
|
||||
/sys/bus/pci/slots/. The names of these directories are
|
||||
specific to the driver, which in turn, are specific to the
|
||||
platform, but in general, should match the label on the
|
||||
machine's physical chassis.
|
||||
|
||||
The drivers that can create slot directories include the
|
||||
PCI hotplug drivers, and as of 2.6.27, the pci_slot driver.
|
||||
|
||||
The slot directories contain, at a minimum, a file named
|
||||
'address' which contains the PCI bus:device:function tuple.
|
||||
Other files may appear as well, but are specific to the
|
||||
driver.
|
||||
|
||||
What: /sys/bus/pci/slots/.../function[0-7]
|
||||
Date: March 2010
|
||||
KernelVersion: 2.6.35
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
If PCI slot directories (as described above) are created,
|
||||
and the physical slot is actually populated with a device,
|
||||
symbolic links in the slot directory pointing to the
|
||||
device's PCI functions are created as well.
|
||||
|
||||
What: /sys/bus/pci/devices/.../slot
|
||||
Date: March 2010
|
||||
KernelVersion: 2.6.35
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
If PCI slot directories (as described above) are created,
|
||||
a symbolic link pointing to the slot directory will be
|
||||
created as well.
|
||||
|
||||
What: /sys/bus/pci/slots/.../module
|
||||
Date: June 2009
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
|
|
|
@ -794,11 +794,6 @@ designed.
|
|||
|
||||
Roadmap:
|
||||
|
||||
2.6.35 Inclusion in mainline as an experimental mount option
|
||||
=> approximately 2-3 months to merge window
|
||||
=> needs to be in xfs-dev tree in 4-6 weeks
|
||||
=> code is nearing readiness for review
|
||||
|
||||
2.6.37 Remove experimental tag from mount option
|
||||
=> should be roughly 6 months after initial merge
|
||||
=> enough time to:
|
||||
|
|
|
@ -2978,7 +2978,6 @@ F: drivers/net/ixgb/
|
|||
F: drivers/net/ixgbe/
|
||||
|
||||
INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
|
||||
M: Zhu Yi <yi.zhu@intel.com>
|
||||
M: Reinette Chatre <reinette.chatre@intel.com>
|
||||
M: Intel Linux Wireless <ilw@linux.intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
|
@ -2988,7 +2987,6 @@ F: Documentation/networking/README.ipw2100
|
|||
F: drivers/net/wireless/ipw2x00/ipw2100.*
|
||||
|
||||
INTEL PRO/WIRELESS 2915ABG NETWORK CONNECTION SUPPORT
|
||||
M: Zhu Yi <yi.zhu@intel.com>
|
||||
M: Reinette Chatre <reinette.chatre@intel.com>
|
||||
M: Intel Linux Wireless <ilw@linux.intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
|
@ -3019,8 +3017,8 @@ F: drivers/net/wimax/i2400m/
|
|||
F: include/linux/wimax/i2400m.h
|
||||
|
||||
INTEL WIRELESS WIFI LINK (iwlwifi)
|
||||
M: Zhu Yi <yi.zhu@intel.com>
|
||||
M: Reinette Chatre <reinette.chatre@intel.com>
|
||||
M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
|
||||
M: Intel Linux Wireless <ilw@linux.intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://intellinuxwireless.org
|
||||
|
@ -3030,7 +3028,6 @@ F: drivers/net/wireless/iwlwifi/
|
|||
|
||||
INTEL WIRELESS MULTICOMM 3200 WIFI (iwmc3200wifi)
|
||||
M: Samuel Ortiz <samuel.ortiz@intel.com>
|
||||
M: Zhu Yi <yi.zhu@intel.com>
|
||||
M: Intel Linux Wireless <ilw@linux.intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
6
Makefile
6
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 35
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Sheep on Meth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1095,7 +1095,7 @@ all: modules
|
|||
# using awk while concatenating to the final file.
|
||||
|
||||
PHONY += modules
|
||||
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
|
||||
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
|
||||
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
|
||||
@$(kecho) ' Building modules, stage 2.';
|
||||
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
||||
|
@ -1117,7 +1117,7 @@ PHONY += modules_install
|
|||
modules_install: _modinst_ _modinst_post
|
||||
|
||||
PHONY += _modinst_
|
||||
_modinst_: modules.builtin
|
||||
_modinst_:
|
||||
@if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
|
||||
echo "Warning: you may need to install module-init-tools"; \
|
||||
echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
|
||||
|
|
|
@ -951,8 +951,6 @@ static int sa1111_resume(struct platform_device *dev)
|
|||
if (!save)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&sachip->lock, flags);
|
||||
|
||||
/*
|
||||
* Ensure that the SA1111 is still here.
|
||||
* FIXME: shouldn't do this here.
|
||||
|
@ -969,6 +967,13 @@ static int sa1111_resume(struct platform_device *dev)
|
|||
* First of all, wake up the chip.
|
||||
*/
|
||||
sa1111_wake(sachip);
|
||||
|
||||
/*
|
||||
* Only lock for write ops. Also, sa1111_wake must be called with
|
||||
* released spinlock!
|
||||
*/
|
||||
spin_lock_irqsave(&sachip->lock, flags);
|
||||
|
||||
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
|
||||
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/completion.h>
|
||||
#include <mach/dma.h>
|
||||
|
||||
#define MSM_DMOV_CHANNEL_COUNT 16
|
||||
|
|
|
@ -32,7 +32,10 @@ void clk_disable(struct clk *clk)
|
|||
}
|
||||
EXPORT_SYMBOL(clk_disable);
|
||||
|
||||
/* We have a fixed clock alone, for now */
|
||||
static struct clk clk_24 = {
|
||||
.rate = 2400000,
|
||||
};
|
||||
|
||||
static struct clk clk_48 = {
|
||||
.rate = 48 * 1000 * 1000,
|
||||
};
|
||||
|
@ -50,6 +53,8 @@ static struct clk clk_default;
|
|||
}
|
||||
|
||||
static struct clk_lookup lookups[] = {
|
||||
CLK(&clk_24, "mtu0"),
|
||||
CLK(&clk_24, "mtu1"),
|
||||
CLK(&clk_48, "uart0"),
|
||||
CLK(&clk_48, "uart1"),
|
||||
CLK(&clk_default, "gpio.0"),
|
||||
|
@ -59,10 +64,8 @@ static struct clk_lookup lookups[] = {
|
|||
CLK(&clk_default, "rng"),
|
||||
};
|
||||
|
||||
static int __init clk_init(void)
|
||||
int __init clk_init(void)
|
||||
{
|
||||
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
|
||||
return 0;
|
||||
}
|
||||
|
||||
arch_initcall(clk_init);
|
||||
|
|
|
@ -11,3 +11,5 @@
|
|||
struct clk {
|
||||
unsigned long rate;
|
||||
};
|
||||
|
||||
int __init clk_init(void);
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
|
||||
#include "clock.h"
|
||||
|
||||
#define __MEM_4K_RESOURCE(x) \
|
||||
.res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
|
||||
|
||||
|
@ -143,6 +145,12 @@ void __init cpu8815_init_irq(void)
|
|||
/* This modified VIC cell has two register blocks, at 0 and 0x20 */
|
||||
vic_init(io_p2v(NOMADIK_IC_BASE + 0x00), IRQ_VIC_START + 0, ~0, 0);
|
||||
vic_init(io_p2v(NOMADIK_IC_BASE + 0x20), IRQ_VIC_START + 32, ~0, 0);
|
||||
|
||||
/*
|
||||
* Init clocks here so that they are available for system timer
|
||||
* initialization.
|
||||
*/
|
||||
clk_init();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -263,11 +263,11 @@ const struct matrix_keymap_data palmtc_keymap_data = {
|
|||
.keymap_size = ARRAY_SIZE(palmtc_matrix_keys),
|
||||
};
|
||||
|
||||
const static unsigned int palmtc_keypad_row_gpios[] = {
|
||||
static const unsigned int palmtc_keypad_row_gpios[] = {
|
||||
0, 9, 10, 11
|
||||
};
|
||||
|
||||
const static unsigned int palmtc_keypad_col_gpios[] = {
|
||||
static const unsigned int palmtc_keypad_col_gpios[] = {
|
||||
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 79, 80
|
||||
};
|
||||
|
||||
|
|
|
@ -818,6 +818,9 @@ static struct i2c_board_info akita_i2c_board_info[] = {
|
|||
.type = "max7310",
|
||||
.addr = 0x18,
|
||||
.platform_data = &akita_ioexp,
|
||||
}, {
|
||||
.type = "wm8750",
|
||||
.addr = 0x1b,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -7,4 +7,5 @@ obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o devices-db5500.o
|
|||
obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
|
||||
obj-$(CONFIG_MACH_U8500_MOP) += board-mop500.o
|
||||
obj-$(CONFIG_MACH_U5500) += board-u5500.o
|
||||
obj-$(CONFIG_SMP) += platsmp.o headsmp.o localtimer.o
|
||||
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
|
||||
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
#include <plat/mtu.h>
|
||||
#include <mach/hardware.h>
|
||||
#include "clock.h"
|
||||
|
||||
|
@ -59,6 +60,9 @@
|
|||
#define PRCM_DMACLK_MGT 0x074
|
||||
#define PRCM_B2R2CLK_MGT 0x078
|
||||
#define PRCM_TVCLK_MGT 0x07C
|
||||
#define PRCM_TCR 0x1C8
|
||||
#define PRCM_TCR_STOPPED (1 << 16)
|
||||
#define PRCM_TCR_DOZE_MODE (1 << 17)
|
||||
#define PRCM_UNIPROCLK_MGT 0x278
|
||||
#define PRCM_SSPCLK_MGT 0x280
|
||||
#define PRCM_RNGCLK_MGT 0x284
|
||||
|
@ -120,10 +124,95 @@ void clk_disable(struct clk *clk)
|
|||
}
|
||||
EXPORT_SYMBOL(clk_disable);
|
||||
|
||||
/*
|
||||
* The MTU has a separate, rather complex muxing setup
|
||||
* with alternative parents (peripheral cluster or
|
||||
* ULP or fixed 32768 Hz) depending on settings
|
||||
*/
|
||||
static unsigned long clk_mtu_get_rate(struct clk *clk)
|
||||
{
|
||||
void __iomem *addr = __io_address(U8500_PRCMU_BASE)
|
||||
+ PRCM_TCR;
|
||||
u32 tcr = readl(addr);
|
||||
int mtu = (int) clk->data;
|
||||
/*
|
||||
* One of these is selected eventually
|
||||
* TODO: Replace the constant with a reference
|
||||
* to the ULP source once this is modeled.
|
||||
*/
|
||||
unsigned long clk32k = 32768;
|
||||
unsigned long mturate;
|
||||
unsigned long retclk;
|
||||
|
||||
/* Get the rate from the parent as a default */
|
||||
if (clk->parent_periph)
|
||||
mturate = clk_get_rate(clk->parent_periph);
|
||||
else if (clk->parent_cluster)
|
||||
mturate = clk_get_rate(clk->parent_cluster);
|
||||
else
|
||||
/* We need to be connected SOMEWHERE */
|
||||
BUG();
|
||||
|
||||
/*
|
||||
* Are we in doze mode?
|
||||
* In this mode the parent peripheral or the fixed 32768 Hz
|
||||
* clock is fed into the block.
|
||||
*/
|
||||
if (!(tcr & PRCM_TCR_DOZE_MODE)) {
|
||||
/*
|
||||
* Here we're using the clock input from the APE ULP
|
||||
* clock domain. But first: are the timers stopped?
|
||||
*/
|
||||
if (tcr & PRCM_TCR_STOPPED) {
|
||||
clk32k = 0;
|
||||
mturate = 0;
|
||||
} else {
|
||||
/* Else default mode: 0 and 2.4 MHz */
|
||||
clk32k = 0;
|
||||
if (cpu_is_u5500())
|
||||
/* DB5500 divides by 8 */
|
||||
mturate /= 8;
|
||||
else if (cpu_is_u8500ed()) {
|
||||
/*
|
||||
* This clocking setting must not be used
|
||||
* in the ED chip, it is simply not
|
||||
* connected anywhere!
|
||||
*/
|
||||
mturate = 0;
|
||||
BUG();
|
||||
} else
|
||||
/*
|
||||
* In this mode the ulp38m4 clock is divided
|
||||
* by a factor 16, on the DB8500 typically
|
||||
* 38400000 / 16 ~ 2.4 MHz.
|
||||
* TODO: Replace the constant with a reference
|
||||
* to the ULP source once this is modeled.
|
||||
*/
|
||||
mturate = 38400000 / 16;
|
||||
}
|
||||
}
|
||||
|
||||
/* Return the clock selected for this MTU */
|
||||
if (tcr & (1 << mtu))
|
||||
retclk = clk32k;
|
||||
else
|
||||
retclk = mturate;
|
||||
|
||||
pr_info("MTU%d clock rate: %lu Hz\n", mtu, retclk);
|
||||
return retclk;
|
||||
}
|
||||
|
||||
unsigned long clk_get_rate(struct clk *clk)
|
||||
{
|
||||
unsigned long rate;
|
||||
|
||||
/*
|
||||
* If there is a custom getrate callback for this clock,
|
||||
* it will take precedence.
|
||||
*/
|
||||
if (clk->get_rate)
|
||||
return clk->get_rate(clk);
|
||||
|
||||
if (clk->ops && clk->ops->get_rate)
|
||||
return clk->ops->get_rate(clk);
|
||||
|
||||
|
@ -341,8 +430,9 @@ static DEFINE_PRCC_CLK(5, usb_v1, 0, 0, NULL);
|
|||
|
||||
/* Peripheral Cluster #6 */
|
||||
|
||||
static DEFINE_PRCC_CLK(6, mtu1_v1, 8, -1, NULL);
|
||||
static DEFINE_PRCC_CLK(6, mtu0_v1, 7, -1, NULL);
|
||||
/* MTU ID in data */
|
||||
static DEFINE_PRCC_CLK_CUSTOM(6, mtu1_v1, 8, -1, NULL, clk_mtu_get_rate, 1);
|
||||
static DEFINE_PRCC_CLK_CUSTOM(6, mtu0_v1, 7, -1, NULL, clk_mtu_get_rate, 0);
|
||||
static DEFINE_PRCC_CLK(6, cfgreg_v1, 6, 6, NULL);
|
||||
static DEFINE_PRCC_CLK(6, dmc_ed, 6, 6, NULL);
|
||||
static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL);
|
||||
|
@ -357,8 +447,9 @@ static DEFINE_PRCC_CLK(6, rng_v1, 0, 0, &clk_rngclk);
|
|||
/* Peripheral Cluster #7 */
|
||||
|
||||
static DEFINE_PRCC_CLK(7, tzpc0_ed, 4, -1, NULL);
|
||||
static DEFINE_PRCC_CLK(7, mtu1_ed, 3, -1, NULL);
|
||||
static DEFINE_PRCC_CLK(7, mtu0_ed, 2, -1, NULL);
|
||||
/* MTU ID in data */
|
||||
static DEFINE_PRCC_CLK_CUSTOM(7, mtu1_ed, 3, -1, NULL, clk_mtu_get_rate, 1);
|
||||
static DEFINE_PRCC_CLK_CUSTOM(7, mtu0_ed, 2, -1, NULL, clk_mtu_get_rate, 0);
|
||||
static DEFINE_PRCC_CLK(7, wdg_ed, 1, -1, NULL);
|
||||
static DEFINE_PRCC_CLK(7, cfgreg_ed, 0, -1, NULL);
|
||||
|
||||
|
@ -503,15 +594,17 @@ static struct clk_lookup u8500_v1_clks[] = {
|
|||
CLK(uiccclk, "uicc", NULL),
|
||||
};
|
||||
|
||||
static int __init clk_init(void)
|
||||
int __init clk_init(void)
|
||||
{
|
||||
if (cpu_is_u8500ed()) {
|
||||
clk_prcmu_ops.enable = clk_prcmu_ed_enable;
|
||||
clk_prcmu_ops.disable = clk_prcmu_ed_disable;
|
||||
clk_per6clk.rate = 100000000;
|
||||
} else if (cpu_is_u5500()) {
|
||||
/* Clock tree for U5500 not implemented yet */
|
||||
clk_prcc_ops.enable = clk_prcc_ops.disable = NULL;
|
||||
clk_prcmu_ops.enable = clk_prcmu_ops.disable = NULL;
|
||||
clk_per6clk.rate = 26000000;
|
||||
}
|
||||
|
||||
clkdev_add_table(u8500_common_clks, ARRAY_SIZE(u8500_common_clks));
|
||||
|
@ -522,4 +615,3 @@ static int __init clk_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(clk_init);
|
||||
|
|
|
@ -28,6 +28,9 @@ struct clkops {
|
|||
* @ops: pointer to clkops struct used to control this clock
|
||||
* @name: name, for debugging
|
||||
* @enabled: refcount. positive if enabled, zero if disabled
|
||||
* @get_rate: custom callback for getting the clock rate
|
||||
* @data: custom per-clock data for example for the get_rate
|
||||
* callback
|
||||
* @rate: fixed rate for clocks which don't implement
|
||||
* ops->getrate
|
||||
* @prcmu_cg_off: address offset of the combined enable/disable register
|
||||
|
@ -67,6 +70,8 @@ struct clk {
|
|||
const struct clkops *ops;
|
||||
const char *name;
|
||||
unsigned int enabled;
|
||||
unsigned long (*get_rate)(struct clk *);
|
||||
void *data;
|
||||
|
||||
unsigned long rate;
|
||||
struct list_head list;
|
||||
|
@ -117,9 +122,26 @@ struct clk clk_##_name = { \
|
|||
.parent_periph = _kernclk \
|
||||
}
|
||||
|
||||
#define DEFINE_PRCC_CLK_CUSTOM(_pclust, _name, _bus_en, _kernel_en, _kernclk, _callback, _data) \
|
||||
struct clk clk_##_name = { \
|
||||
.name = #_name, \
|
||||
.ops = &clk_prcc_ops, \
|
||||
.cluster = _pclust, \
|
||||
.prcc_bus = _bus_en, \
|
||||
.prcc_kernel = _kernel_en, \
|
||||
.parent_cluster = &clk_per##_pclust##clk, \
|
||||
.parent_periph = _kernclk, \
|
||||
.get_rate = _callback, \
|
||||
.data = (void *) _data \
|
||||
}
|
||||
|
||||
|
||||
#define CLK(_clk, _devname, _conname) \
|
||||
{ \
|
||||
.clk = &clk_##_clk, \
|
||||
.dev_id = _devname, \
|
||||
.con_id = _conname, \
|
||||
}
|
||||
|
||||
int __init clk_db8500_ed_fixup(void);
|
||||
int __init clk_init(void);
|
||||
|
|
|
@ -62,6 +62,12 @@ void __init ux500_init_irq(void)
|
|||
{
|
||||
gic_dist_init(0, __io_address(UX500_GIC_DIST_BASE), 29);
|
||||
gic_cpu_init(0, __io_address(UX500_GIC_CPU_BASE));
|
||||
|
||||
/*
|
||||
* Init clocks here so that they are available for system timer
|
||||
* initialization.
|
||||
*/
|
||||
clk_init();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
|
|
|
@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
|
|||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4-r9, lr} \n\
|
||||
mov ip, %0 \n\
|
||||
mov ip, %2 \n\
|
||||
1: mov lr, r1 \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
pld [lr, #32] \n\
|
||||
|
@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
|
|||
mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
|
||||
ldmfd sp!, {r4-r9, pc}"
|
||||
:
|
||||
: "I" (PAGE_SIZE));
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
|
||||
}
|
||||
|
||||
void feroceon_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
|
|||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %0 @ 1\n\
|
||||
mov r2, %2 @ 1\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
|
@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
|
|||
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 64));
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
|
||||
}
|
||||
|
||||
void v4wb_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
|
|||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %0 @ 1\n\
|
||||
mov r2, %2 @ 1\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
|
@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
|
|||
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 64));
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
|
||||
}
|
||||
|
||||
void v4wt_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
|
|||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, r5, lr} \n\
|
||||
mov lr, %0 \n\
|
||||
mov lr, %2 \n\
|
||||
\n\
|
||||
pld [r1, #0] \n\
|
||||
pld [r1, #32] \n\
|
||||
|
@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
|
|||
\n\
|
||||
ldmfd sp!, {r4, r5, pc}"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 64 - 1));
|
||||
: "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
|
||||
}
|
||||
|
||||
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -393,6 +393,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
|
|||
if (addr < TASK_SIZE)
|
||||
return do_page_fault(addr, fsr, regs);
|
||||
|
||||
if (user_mode(regs))
|
||||
goto bad_area;
|
||||
|
||||
index = pgd_index(addr);
|
||||
|
||||
/*
|
||||
|
|
|
@ -48,7 +48,16 @@ void *kmap_atomic(struct page *page, enum km_type type)
|
|||
|
||||
debug_kmap_atomic(type);
|
||||
|
||||
kmap = kmap_high_get(page);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
/*
|
||||
* There is no cache coherency issue when non VIVT, so force the
|
||||
* dedicated kmap usage for better debugging purposes in that case.
|
||||
*/
|
||||
if (!cache_is_vivt())
|
||||
kmap = NULL;
|
||||
else
|
||||
#endif
|
||||
kmap = kmap_high_get(page);
|
||||
if (kmap)
|
||||
return kmap;
|
||||
|
||||
|
|
|
@ -678,10 +678,10 @@ void __init mem_init(void)
|
|||
void free_initmem(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
extern char *__tcm_start, *__tcm_end;
|
||||
extern char __tcm_start, __tcm_end;
|
||||
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
|
||||
__phys_to_pfn(__pa(__tcm_end)),
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
|
||||
__phys_to_pfn(__pa(&__tcm_end)),
|
||||
"TCM link");
|
||||
#endif
|
||||
|
||||
|
|
|
@ -13,7 +13,9 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
#include <plat/mtu.h>
|
||||
|
@ -124,13 +126,25 @@ static struct irqaction nmdk_timer_irq = {
|
|||
void __init nmdk_timer_init(void)
|
||||
{
|
||||
unsigned long rate;
|
||||
u32 cr = MTU_CRn_32BITS;;
|
||||
struct clk *clk0;
|
||||
struct clk *clk1;
|
||||
u32 cr;
|
||||
|
||||
clk0 = clk_get_sys("mtu0", NULL);
|
||||
BUG_ON(IS_ERR(clk0));
|
||||
|
||||
clk1 = clk_get_sys("mtu1", NULL);
|
||||
BUG_ON(IS_ERR(clk1));
|
||||
|
||||
clk_enable(clk0);
|
||||
clk_enable(clk1);
|
||||
|
||||
/*
|
||||
* Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
|
||||
* use a divide-by-16 counter if it's more than 16MHz
|
||||
*/
|
||||
rate = CLOCK_TICK_RATE;
|
||||
cr = MTU_CRn_32BITS;;
|
||||
rate = clk_get_rate(clk0);
|
||||
if (rate > 16 << 20) {
|
||||
rate /= 16;
|
||||
cr |= MTU_CRn_PRESCALE_16;
|
||||
|
@ -153,6 +167,14 @@ void __init nmdk_timer_init(void)
|
|||
nmdk_clksrc.name);
|
||||
|
||||
/* Timer 1 is used for events, fix according to rate */
|
||||
cr = MTU_CRn_32BITS;
|
||||
rate = clk_get_rate(clk1);
|
||||
if (rate > 16 << 20) {
|
||||
rate /= 16;
|
||||
cr |= MTU_CRn_PRESCALE_16;
|
||||
} else {
|
||||
cr |= MTU_CRn_PRESCALE_1;
|
||||
}
|
||||
writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
|
||||
nmdk_clkevt.mult = div_sc(rate, NSEC_PER_SEC, nmdk_clkevt.shift);
|
||||
nmdk_clkevt.max_delta_ns =
|
||||
|
|
|
@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
|
|||
#ifdef CONFIG_VFPv3
|
||||
@ d16 - d31 registers
|
||||
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
||||
1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
|
||||
1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
|
||||
mov pc, lr
|
||||
.org 1b + 8
|
||||
.endr
|
||||
|
|
|
@ -1789,6 +1789,12 @@ void gdbstub(int sigval)
|
|||
flush_cache = 1;
|
||||
break;
|
||||
|
||||
/* pNN: Read value of reg N and return it */
|
||||
case 'p':
|
||||
/* return no value, indicating that we don't support
|
||||
* this command and that gdb should use 'g' instead */
|
||||
break;
|
||||
|
||||
/* PNN,=RRRRRRRR: Write value R to reg N return OK */
|
||||
case 'P':
|
||||
ptr = &input_buffer[1];
|
||||
|
|
|
@ -144,6 +144,7 @@ int kvm_arch_hardware_enable(void *garbage)
|
|||
VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
|
||||
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
|
||||
if (status != 0) {
|
||||
spin_unlock(&vp_lock);
|
||||
printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -34,6 +34,8 @@
|
|||
/* MS be sure that SLAB allocates aligned objects */
|
||||
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
|
||||
|
||||
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
|
||||
|
||||
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
|
||||
#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
|
||||
|
||||
|
|
|
@ -90,7 +90,6 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
/* FIXME this part of code is untested */
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
|
||||
sg->dma_length = sg->length;
|
||||
__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
|
||||
sg->length, direction);
|
||||
}
|
||||
|
|
|
@ -1277,6 +1277,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
|
|||
printk(KERN_WARNING "PCI: Cannot allocate resource region "
|
||||
"%d of PCI bridge %d, will remap\n", i, bus->number);
|
||||
clear_resource:
|
||||
res->start = res->end = 0;
|
||||
res->flags = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
|
|||
* Invalidate the resource to prevent
|
||||
* child resource allocations in this
|
||||
* range. */
|
||||
r->start = r->end = 0;
|
||||
r->flags = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1309,6 +1309,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
|
|||
printk(KERN_WARNING "PCI: Cannot allocate resource region "
|
||||
"%d of PCI bridge %d, will remap\n", i, bus->number);
|
||||
clear_resource:
|
||||
res->start = res->end = 0;
|
||||
res->flags = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@ static int __init kvmppc_e500_init(void)
|
|||
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
||||
}
|
||||
|
||||
static void __init kvmppc_e500_exit(void)
|
||||
static void __exit kvmppc_e500_exit(void)
|
||||
{
|
||||
kvmppc_booke_exit();
|
||||
}
|
||||
|
|
|
@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
|
|||
index = ENTRIES-1;
|
||||
|
||||
/* make sure index is valid */
|
||||
if ((index > ENTRIES) || (index < 0))
|
||||
if ((index >= ENTRIES) || (index < 0))
|
||||
index = ENTRIES-1;
|
||||
|
||||
return initial_lfsr[index];
|
||||
|
|
|
@ -181,7 +181,7 @@ static int __init appldata_os_init(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
appldata_os_data = kzalloc(max_size, GFP_DMA);
|
||||
appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA);
|
||||
if (appldata_os_data == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.34-rc3
|
||||
# Fri Apr 9 09:57:10 2010
|
||||
# Linux kernel version: 2.6.35-rc1
|
||||
# Fri Jun 4 11:32:40 2010
|
||||
#
|
||||
CONFIG_SCHED_MC=y
|
||||
CONFIG_MMU=y
|
||||
|
@ -35,11 +35,13 @@ CONFIG_CONSTRUCTORS=y
|
|||
CONFIG_EXPERIMENTAL=y
|
||||
CONFIG_LOCK_KERNEL=y
|
||||
CONFIG_INIT_ENV_ARG_LIMIT=32
|
||||
CONFIG_CROSS_COMPILE=""
|
||||
CONFIG_LOCALVERSION=""
|
||||
CONFIG_LOCALVERSION_AUTO=y
|
||||
CONFIG_HAVE_KERNEL_GZIP=y
|
||||
CONFIG_HAVE_KERNEL_BZIP2=y
|
||||
CONFIG_HAVE_KERNEL_LZMA=y
|
||||
CONFIG_HAVE_KERNEL_LZO=y
|
||||
CONFIG_KERNEL_GZIP=y
|
||||
# CONFIG_KERNEL_BZIP2 is not set
|
||||
# CONFIG_KERNEL_LZMA is not set
|
||||
|
@ -77,6 +79,7 @@ CONFIG_CGROUP_NS=y
|
|||
# CONFIG_CGROUP_CPUACCT is not set
|
||||
# CONFIG_RESOURCE_COUNTERS is not set
|
||||
# CONFIG_CGROUP_SCHED is not set
|
||||
# CONFIG_BLK_CGROUP is not set
|
||||
CONFIG_SYSFS_DEPRECATED=y
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
# CONFIG_RELAY is not set
|
||||
|
@ -157,7 +160,6 @@ CONFIG_STOP_MACHINE=y
|
|||
CONFIG_BLOCK=y
|
||||
CONFIG_BLK_DEV_BSG=y
|
||||
# CONFIG_BLK_DEV_INTEGRITY is not set
|
||||
# CONFIG_BLK_CGROUP is not set
|
||||
CONFIG_BLOCK_COMPAT=y
|
||||
|
||||
#
|
||||
|
@ -166,7 +168,6 @@ CONFIG_BLOCK_COMPAT=y
|
|||
CONFIG_IOSCHED_NOOP=y
|
||||
CONFIG_IOSCHED_DEADLINE=y
|
||||
CONFIG_IOSCHED_CFQ=y
|
||||
# CONFIG_CFQ_GROUP_IOSCHED is not set
|
||||
CONFIG_DEFAULT_DEADLINE=y
|
||||
# CONFIG_DEFAULT_CFQ is not set
|
||||
# CONFIG_DEFAULT_NOOP is not set
|
||||
|
@ -247,7 +248,6 @@ CONFIG_64BIT=y
|
|||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=32
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
# CONFIG_SCHED_BOOK is not set
|
||||
CONFIG_COMPAT=y
|
||||
CONFIG_SYSVIPC_COMPAT=y
|
||||
CONFIG_AUDIT_ARCH=y
|
||||
|
@ -320,7 +320,6 @@ CONFIG_COMPAT_BINFMT_ELF=y
|
|||
# CONFIG_HAVE_AOUT is not set
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_FORCE_MAX_ZONEORDER=9
|
||||
# CONFIG_PROCESS_DEBUG is not set
|
||||
CONFIG_PFAULT=y
|
||||
# CONFIG_SHARED_KERNEL is not set
|
||||
# CONFIG_CMM is not set
|
||||
|
@ -457,6 +456,7 @@ CONFIG_NF_CONNTRACK=m
|
|||
# CONFIG_IP6_NF_IPTABLES is not set
|
||||
# CONFIG_IP_DCCP is not set
|
||||
CONFIG_IP_SCTP=m
|
||||
# CONFIG_NET_SCTPPROBE is not set
|
||||
# CONFIG_SCTP_DBG_MSG is not set
|
||||
# CONFIG_SCTP_DBG_OBJCNT is not set
|
||||
# CONFIG_SCTP_HMAC_NONE is not set
|
||||
|
@ -465,6 +465,7 @@ CONFIG_SCTP_HMAC_MD5=y
|
|||
# CONFIG_RDS is not set
|
||||
# CONFIG_TIPC is not set
|
||||
# CONFIG_ATM is not set
|
||||
# CONFIG_L2TP is not set
|
||||
# CONFIG_BRIDGE is not set
|
||||
# CONFIG_VLAN_8021Q is not set
|
||||
# CONFIG_DECNET is not set
|
||||
|
@ -525,6 +526,7 @@ CONFIG_NET_ACT_NAT=m
|
|||
# CONFIG_NET_CLS_IND is not set
|
||||
CONFIG_NET_SCH_FIFO=y
|
||||
# CONFIG_DCB is not set
|
||||
CONFIG_RPS=y
|
||||
|
||||
#
|
||||
# Network testing
|
||||
|
@ -546,6 +548,7 @@ CONFIG_CAN_VCAN=m
|
|||
# CONFIG_WIMAX is not set
|
||||
# CONFIG_RFKILL is not set
|
||||
# CONFIG_NET_9P is not set
|
||||
# CONFIG_CAIF is not set
|
||||
# CONFIG_PCMCIA is not set
|
||||
CONFIG_CCW=y
|
||||
|
||||
|
@ -728,6 +731,7 @@ CONFIG_VIRTIO_NET=m
|
|||
# Character devices
|
||||
#
|
||||
CONFIG_DEVKMEM=y
|
||||
# CONFIG_N_GSM is not set
|
||||
CONFIG_UNIX98_PTYS=y
|
||||
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
|
||||
CONFIG_LEGACY_PTYS=y
|
||||
|
@ -775,6 +779,7 @@ CONFIG_S390_TAPE_34XX=m
|
|||
# CONFIG_MONREADER is not set
|
||||
CONFIG_MONWRITER=m
|
||||
CONFIG_S390_VMUR=m
|
||||
# CONFIG_RAMOOPS is not set
|
||||
|
||||
#
|
||||
# PPS support
|
||||
|
@ -788,10 +793,6 @@ CONFIG_S390_VMUR=m
|
|||
# CONFIG_NEW_LEDS is not set
|
||||
CONFIG_ACCESSIBILITY=y
|
||||
# CONFIG_AUXDISPLAY is not set
|
||||
|
||||
#
|
||||
# TI VLYNQ
|
||||
#
|
||||
# CONFIG_STAGING is not set
|
||||
|
||||
#
|
||||
|
@ -976,6 +977,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
|
|||
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
|
||||
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
|
||||
# CONFIG_LKDTM is not set
|
||||
# CONFIG_CPU_NOTIFIER_ERROR_INJECT is not set
|
||||
# CONFIG_FAULT_INJECTION is not set
|
||||
# CONFIG_LATENCYTOP is not set
|
||||
CONFIG_SYSCTL_SYSCALL_CHECK=y
|
||||
|
@ -1010,6 +1012,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
|
|||
CONFIG_KPROBE_EVENT=y
|
||||
# CONFIG_RING_BUFFER_BENCHMARK is not set
|
||||
# CONFIG_DYNAMIC_DEBUG is not set
|
||||
# CONFIG_ATOMIC64_SELFTEST is not set
|
||||
CONFIG_SAMPLES=y
|
||||
# CONFIG_SAMPLE_TRACEPOINTS is not set
|
||||
# CONFIG_SAMPLE_TRACE_EVENTS is not set
|
||||
|
|
|
@ -55,8 +55,10 @@ void *module_alloc(unsigned long size)
|
|||
/* Free memory returned from module_alloc */
|
||||
void module_free(struct module *mod, void *module_region)
|
||||
{
|
||||
vfree(mod->arch.syminfo);
|
||||
mod->arch.syminfo = NULL;
|
||||
if (mod) {
|
||||
vfree(mod->arch.syminfo);
|
||||
mod->arch.syminfo = NULL;
|
||||
}
|
||||
vfree(module_region);
|
||||
}
|
||||
|
||||
|
|
|
@ -761,7 +761,7 @@ static int __init kvm_s390_init(void)
|
|||
* to hold the maximum amount of facilites. On the other hand, we
|
||||
* only set facilities that are known to work in KVM.
|
||||
*/
|
||||
facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
|
||||
facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
|
||||
if (!facilities) {
|
||||
kvm_exit();
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -113,7 +113,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
|
|||
{
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
|
||||
if (!inti)
|
||||
return -ENOMEM;
|
||||
inti->type = KVM_S390_SIGP_STOP;
|
||||
|
|
|
@ -105,7 +105,7 @@ static int
|
|||
dcss_set_subcodes(void)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
char *name = kmalloc(8 * sizeof(char), GFP_DMA);
|
||||
char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
|
||||
unsigned long rx, ry;
|
||||
int rc;
|
||||
|
||||
|
@ -252,12 +252,13 @@ dcss_diag_translate_rc (int vm_rc) {
|
|||
static int
|
||||
query_segment_type (struct dcss_segment *seg)
|
||||
{
|
||||
struct qin64 *qin = kmalloc (sizeof(struct qin64), GFP_DMA);
|
||||
struct qout64 *qout = kmalloc (sizeof(struct qout64), GFP_DMA);
|
||||
|
||||
int diag_cc, rc, i;
|
||||
unsigned long dummy, vmrc;
|
||||
int diag_cc, rc, i;
|
||||
struct qout64 *qout;
|
||||
struct qin64 *qin;
|
||||
|
||||
qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA);
|
||||
qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA);
|
||||
if ((qin == NULL) || (qout == NULL)) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
|
@ -286,7 +287,7 @@ query_segment_type (struct dcss_segment *seg)
|
|||
copy data for the new format. */
|
||||
if (segext_scode == DCSS_SEGEXT) {
|
||||
struct qout64_old *qout_old;
|
||||
qout_old = kzalloc(sizeof(struct qout64_old), GFP_DMA);
|
||||
qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
|
||||
if (qout_old == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
|
@ -407,11 +408,11 @@ segment_overlaps_others (struct dcss_segment *seg)
|
|||
static int
|
||||
__segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
|
||||
{
|
||||
struct dcss_segment *seg = kmalloc(sizeof(struct dcss_segment),
|
||||
GFP_DMA);
|
||||
int rc, diag_cc;
|
||||
unsigned long start_addr, end_addr, dummy;
|
||||
struct dcss_segment *seg;
|
||||
int rc, diag_cc;
|
||||
|
||||
seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
|
||||
if (seg == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -110,6 +110,7 @@
|
|||
#define MSR_AMD64_PATCH_LOADER 0xc0010020
|
||||
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
|
||||
#define MSR_AMD64_OSVW_STATUS 0xc0010141
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
|
||||
|
|
|
@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(void) { return 0; }
|
|||
struct saved_context {
|
||||
u16 es, fs, gs, ss;
|
||||
unsigned long cr0, cr2, cr3, cr4;
|
||||
u64 misc_enable;
|
||||
bool misc_enable_saved;
|
||||
struct desc_ptr gdt;
|
||||
struct desc_ptr idt;
|
||||
u16 ldt;
|
||||
|
|
|
@ -27,6 +27,8 @@ struct saved_context {
|
|||
u16 ds, es, fs, gs, ss;
|
||||
unsigned long gs_base, gs_kernel_base, fs_base;
|
||||
unsigned long cr0, cr2, cr3, cr4, cr8;
|
||||
u64 misc_enable;
|
||||
bool misc_enable_saved;
|
||||
unsigned long efer;
|
||||
u16 gdt_pad;
|
||||
u16 gdt_limit;
|
||||
|
|
|
@ -1815,6 +1815,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
|
||||
spte |= PT_WRITABLE_MASK;
|
||||
|
||||
if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
|
||||
spte &= ~PT_USER_MASK;
|
||||
|
||||
/*
|
||||
* Optimization: for pte sync, if spte was writable the hash
|
||||
* lookup is unnecessary (and expensive). Write protection
|
||||
|
@ -1870,6 +1873,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
|
||||
child = page_header(pte & PT64_BASE_ADDR_MASK);
|
||||
mmu_page_remove_parent_pte(child, sptep);
|
||||
__set_spte(sptep, shadow_trap_nonpresent_pte);
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
} else if (pfn != spte_to_pfn(*sptep)) {
|
||||
pgprintk("hfn old %lx new %lx\n",
|
||||
spte_to_pfn(*sptep), pfn);
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/ftrace_event.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
#include <asm/virtext.h>
|
||||
|
@ -56,6 +57,8 @@ MODULE_LICENSE("GPL");
|
|||
|
||||
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
|
||||
|
||||
static bool erratum_383_found __read_mostly;
|
||||
|
||||
static const u32 host_save_user_msrs[] = {
|
||||
#ifdef CONFIG_X86_64
|
||||
MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
|
||||
|
@ -374,6 +377,31 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
|
|||
svm->vmcb->control.event_inj_err = error_code;
|
||||
}
|
||||
|
||||
static void svm_init_erratum_383(void)
|
||||
{
|
||||
u32 low, high;
|
||||
int err;
|
||||
u64 val;
|
||||
|
||||
/* Only Fam10h is affected */
|
||||
if (boot_cpu_data.x86 != 0x10)
|
||||
return;
|
||||
|
||||
/* Use _safe variants to not break nested virtualization */
|
||||
val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
val |= (1ULL << 47);
|
||||
|
||||
low = lower_32_bits(val);
|
||||
high = upper_32_bits(val);
|
||||
|
||||
native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
|
||||
|
||||
erratum_383_found = true;
|
||||
}
|
||||
|
||||
static int has_svm(void)
|
||||
{
|
||||
const char *msg;
|
||||
|
@ -429,6 +457,8 @@ static int svm_hardware_enable(void *garbage)
|
|||
|
||||
wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
|
||||
|
||||
svm_init_erratum_383();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1410,8 +1440,59 @@ static int nm_interception(struct vcpu_svm *svm)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int mc_interception(struct vcpu_svm *svm)
|
||||
static bool is_erratum_383(void)
|
||||
{
|
||||
int err, i;
|
||||
u64 value;
|
||||
|
||||
if (!erratum_383_found)
|
||||
return false;
|
||||
|
||||
value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
|
||||
if (err)
|
||||
return false;
|
||||
|
||||
/* Bit 62 may or may not be set for this mce */
|
||||
value &= ~(1ULL << 62);
|
||||
|
||||
if (value != 0xb600000000010015ULL)
|
||||
return false;
|
||||
|
||||
/* Clear MCi_STATUS registers */
|
||||
for (i = 0; i < 6; ++i)
|
||||
native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
|
||||
|
||||
value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
|
||||
if (!err) {
|
||||
u32 low, high;
|
||||
|
||||
value &= ~(1ULL << 2);
|
||||
low = lower_32_bits(value);
|
||||
high = upper_32_bits(value);
|
||||
|
||||
native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
|
||||
}
|
||||
|
||||
/* Flush tlb to evict multi-match entries */
|
||||
__flush_tlb_all();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void svm_handle_mce(struct vcpu_svm *svm)
|
||||
{
|
||||
if (is_erratum_383()) {
|
||||
/*
|
||||
* Erratum 383 triggered. Guest state is corrupt so kill the
|
||||
* guest.
|
||||
*/
|
||||
pr_err("KVM: Guest triggered AMD Erratum 383\n");
|
||||
|
||||
set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* On an #MC intercept the MCE handler is not called automatically in
|
||||
* the host. So do it by hand here.
|
||||
|
@ -1420,6 +1501,11 @@ static int mc_interception(struct vcpu_svm *svm)
|
|||
"int $0x12\n");
|
||||
/* not sure if we ever come back to this point */
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int mc_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -3088,6 +3174,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
|
||||
vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to handle MC intercepts here before the vcpu has a chance to
|
||||
* change the physical cpu
|
||||
*/
|
||||
if (unlikely(svm->vmcb->control.exit_code ==
|
||||
SVM_EXIT_EXCP_BASE + MC_VECTOR))
|
||||
svm_handle_mce(svm);
|
||||
}
|
||||
|
||||
#undef R
|
||||
|
|
|
@ -96,6 +96,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
|
|||
* the fact the PCI specs explicitly allow address decoders to be
|
||||
* shared between expansion ROMs and other resource regions, it's
|
||||
* at least dangerous)
|
||||
* - bad resource sizes or overlaps with other regions
|
||||
*
|
||||
* Our solution:
|
||||
* (1) Allocate resources for all buses behind PCI-to-PCI bridges.
|
||||
|
@ -136,6 +137,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
|
|||
* child resource allocations in this
|
||||
* range.
|
||||
*/
|
||||
r->start = r->end = 0;
|
||||
r->flags = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,6 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt)
|
|||
ctxt->cr4 = read_cr4();
|
||||
ctxt->cr8 = read_cr8();
|
||||
#endif
|
||||
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
|
||||
&ctxt->misc_enable);
|
||||
}
|
||||
|
||||
/* Needed by apm.c */
|
||||
|
@ -152,6 +154,8 @@ static void fix_processor_context(void)
|
|||
*/
|
||||
static void __restore_processor_state(struct saved_context *ctxt)
|
||||
{
|
||||
if (ctxt->misc_enable_saved)
|
||||
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
|
||||
/*
|
||||
* control registers
|
||||
*/
|
||||
|
|
|
@ -541,29 +541,11 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int ahci_is_device_present(void __iomem *port_mmio)
|
||||
{
|
||||
u8 status = readl(port_mmio + PORT_TFDATA) & 0xff;
|
||||
|
||||
/* Make sure PxTFD.STS.BSY and PxTFD.STS.DRQ are 0 */
|
||||
if (status & (ATA_BUSY | ATA_DRQ))
|
||||
return 0;
|
||||
|
||||
/* Make sure PxSSTS.DET is 3h */
|
||||
status = readl(port_mmio + PORT_SCR_STAT) & 0xf;
|
||||
if (status != 3)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void ahci_start_engine(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 tmp;
|
||||
|
||||
if (!ahci_is_device_present(port_mmio))
|
||||
return;
|
||||
|
||||
/* start DMA */
|
||||
tmp = readl(port_mmio + PORT_CMD);
|
||||
tmp |= PORT_CMD_START;
|
||||
|
@ -1892,6 +1874,9 @@ static void ahci_error_handler(struct ata_port *ap)
|
|||
}
|
||||
|
||||
sata_pmp_error_handler(ap);
|
||||
|
||||
if (!ata_dev_enabled(ap->link.device))
|
||||
ahci_stop_engine(ap);
|
||||
}
|
||||
|
||||
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
|
||||
|
|
|
@ -539,12 +539,12 @@ static void sil24_config_port(struct ata_port *ap)
|
|||
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
|
||||
|
||||
/* zero error counters. */
|
||||
writel(0x8000, port + PORT_DECODE_ERR_THRESH);
|
||||
writel(0x8000, port + PORT_CRC_ERR_THRESH);
|
||||
writel(0x8000, port + PORT_HSHK_ERR_THRESH);
|
||||
writel(0x0000, port + PORT_DECODE_ERR_CNT);
|
||||
writel(0x0000, port + PORT_CRC_ERR_CNT);
|
||||
writel(0x0000, port + PORT_HSHK_ERR_CNT);
|
||||
writew(0x8000, port + PORT_DECODE_ERR_THRESH);
|
||||
writew(0x8000, port + PORT_CRC_ERR_THRESH);
|
||||
writew(0x8000, port + PORT_HSHK_ERR_THRESH);
|
||||
writew(0x0000, port + PORT_DECODE_ERR_CNT);
|
||||
writew(0x0000, port + PORT_CRC_ERR_CNT);
|
||||
writew(0x0000, port + PORT_HSHK_ERR_CNT);
|
||||
|
||||
/* always use 64bit activation */
|
||||
writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
|
||||
|
@ -622,6 +622,11 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
|
|||
irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
|
||||
writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
|
||||
|
||||
/*
|
||||
* The barrier is required to ensure that writes to cmd_block reach
|
||||
* the memory before the write to PORT_CMD_ACTIVATE.
|
||||
*/
|
||||
wmb();
|
||||
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
|
||||
writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
|
||||
|
||||
|
@ -865,7 +870,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
|
|||
} else {
|
||||
prb = &cb->atapi.prb;
|
||||
sge = cb->atapi.sge;
|
||||
memset(cb->atapi.cdb, 0, 32);
|
||||
memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
|
||||
memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
|
||||
|
||||
if (ata_is_data(qc->tf.protocol)) {
|
||||
|
@ -895,6 +900,11 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
|
|||
paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
|
||||
activate = port + PORT_CMD_ACTIVATE + tag * 8;
|
||||
|
||||
/*
|
||||
* The barrier is required to ensure that writes to cmd_block reach
|
||||
* the memory before the write to PORT_CMD_ACTIVATE.
|
||||
*/
|
||||
wmb();
|
||||
writel((u32)paddr, activate);
|
||||
writel((u64)paddr >> 32, activate + 4);
|
||||
|
||||
|
|
|
@ -578,10 +578,24 @@ static void svia_configure(struct pci_dev *pdev)
|
|||
|
||||
/*
|
||||
* vt6421 has problems talking to some drives. The following
|
||||
* is the magic fix from Joseph Chan <JosephChan@via.com.tw>.
|
||||
* Please add proper documentation if possible.
|
||||
* is the fix from Joseph Chan <JosephChan@via.com.tw>.
|
||||
*
|
||||
* When host issues HOLD, device may send up to 20DW of data
|
||||
* before acknowledging it with HOLDA and the host should be
|
||||
* able to buffer them in FIFO. Unfortunately, some WD drives
|
||||
* send upto 40DW before acknowledging HOLD and, in the
|
||||
* default configuration, this ends up overflowing vt6421's
|
||||
* FIFO, making the controller abort the transaction with
|
||||
* R_ERR.
|
||||
*
|
||||
* Rx52[2] is the internal 128DW FIFO Flow control watermark
|
||||
* adjusting mechanism enable bit and the default value 0
|
||||
* means host will issue HOLD to device when the left FIFO
|
||||
* size goes below 32DW. Setting it to 1 makes the watermark
|
||||
* 64DW.
|
||||
*
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
|
||||
* http://article.gmane.org/gmane.linux.ide/46352
|
||||
*/
|
||||
if (pdev->device == 0x3249) {
|
||||
pci_read_config_byte(pdev, 0x52, &tmp8);
|
||||
|
|
|
@ -304,7 +304,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
|
|||
d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
|
||||
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
|
||||
scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
|
||||
scr_memsetw(d + (b - t - nr) * vc->vc_size_row, vc->vc_video_erase_char,
|
||||
scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
|
||||
vc->vc_size_row * nr);
|
||||
}
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
|
|||
static void fw_card_bm_work(struct work_struct *work)
|
||||
{
|
||||
struct fw_card *card = container_of(work, struct fw_card, work.work);
|
||||
struct fw_device *root_device;
|
||||
struct fw_device *root_device, *irm_device;
|
||||
struct fw_node *root_node;
|
||||
unsigned long flags;
|
||||
int root_id, new_root_id, irm_id, local_id;
|
||||
|
@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work)
|
|||
bool do_reset = false;
|
||||
bool root_device_is_running;
|
||||
bool root_device_is_cmc;
|
||||
bool irm_is_1394_1995_only;
|
||||
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
|
||||
|
@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
generation = card->generation;
|
||||
|
||||
root_node = card->root_node;
|
||||
fw_node_get(root_node);
|
||||
root_device = root_node->data;
|
||||
root_device_is_running = root_device &&
|
||||
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
|
||||
root_device_is_cmc = root_device && root_device->cmc;
|
||||
|
||||
irm_device = card->irm_node->data;
|
||||
irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
|
||||
(irm_device->config_rom[2] & 0x000000f0) == 0;
|
||||
|
||||
root_id = root_node->node_id;
|
||||
irm_id = card->irm_node->node_id;
|
||||
local_id = card->local_node->node_id;
|
||||
|
@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work)
|
|||
|
||||
if (!card->irm_node->link_on) {
|
||||
new_root_id = local_id;
|
||||
fw_notify("IRM has link off, making local node (%02x) root.\n",
|
||||
new_root_id);
|
||||
fw_notify("%s, making local node (%02x) root.\n",
|
||||
"IRM has link off", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
|
||||
if (irm_is_1394_1995_only) {
|
||||
new_root_id = local_id;
|
||||
fw_notify("%s, making local node (%02x) root.\n",
|
||||
"IRM is not 1394a compliant", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
|
||||
|
@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work)
|
|||
* root, and thus, IRM.
|
||||
*/
|
||||
new_root_id = local_id;
|
||||
fw_notify("BM lock failed, making local node (%02x) root.\n",
|
||||
new_root_id);
|
||||
fw_notify("%s, making local node (%02x) root.\n",
|
||||
"BM lock failed", new_root_id);
|
||||
goto pick_me;
|
||||
}
|
||||
} else if (card->bm_generation != generation) {
|
||||
|
|
|
@ -1840,8 +1840,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
|
|||
|
||||
ret = copy_from_user(clips, clips_ptr,
|
||||
num_clips * sizeof(*clips));
|
||||
if (ret)
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto out_err2;
|
||||
}
|
||||
}
|
||||
|
||||
if (fb->funcs->dirty) {
|
||||
|
|
|
@ -264,7 +264,7 @@ bool drm_fb_helper_force_kernel_mode(void)
|
|||
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
|
||||
void *panic_str)
|
||||
{
|
||||
DRM_ERROR("panic occurred, switching back to text console\n");
|
||||
printk(KERN_ERR "panic occurred, switching back to text console\n");
|
||||
return drm_fb_helper_force_kernel_mode();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1402,19 +1402,19 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
|
||||
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
|
||||
if (ret)
|
||||
goto destroy_ringbuffer;
|
||||
goto cleanup_ringbuffer;
|
||||
|
||||
ret = vga_switcheroo_register_client(dev->pdev,
|
||||
i915_switcheroo_set_state,
|
||||
i915_switcheroo_can_switch);
|
||||
if (ret)
|
||||
goto destroy_ringbuffer;
|
||||
goto cleanup_vga_client;
|
||||
|
||||
intel_modeset_init(dev);
|
||||
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
goto destroy_ringbuffer;
|
||||
goto cleanup_vga_switcheroo;
|
||||
|
||||
/* Always safe in the mode setting case. */
|
||||
/* FIXME: do pre/post-mode set stuff in core KMS code */
|
||||
|
@ -1426,11 +1426,20 @@ static int i915_load_modeset_init(struct drm_device *dev,
|
|||
|
||||
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
|
||||
|
||||
intel_fbdev_init(dev);
|
||||
ret = intel_fbdev_init(dev);
|
||||
if (ret)
|
||||
goto cleanup_irq;
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
return 0;
|
||||
|
||||
destroy_ringbuffer:
|
||||
cleanup_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
cleanup_vga_switcheroo:
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
cleanup_vga_client:
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
cleanup_ringbuffer:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -278,6 +278,7 @@ typedef struct drm_i915_private {
|
|||
struct mem_block *agp_heap;
|
||||
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
|
||||
int vblank_pipe;
|
||||
int num_pipe;
|
||||
|
||||
/* For hangcheck timer */
|
||||
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
|
||||
|
|
|
@ -3653,6 +3653,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
|||
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
|
||||
}
|
||||
|
||||
dspcntr |= DISPLAY_PLANE_ENABLE;
|
||||
pipeconf |= PIPEACONF_ENABLE;
|
||||
dpll |= DPLL_VCO_ENABLE;
|
||||
|
||||
|
||||
/* Disable the panel fitter if it was on our pipe */
|
||||
if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
|
||||
I915_WRITE(PFIT_CONTROL, 0);
|
||||
|
@ -3973,6 +3978,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
DRM_ERROR("failed to pin cursor bo\n");
|
||||
goto fail_locked;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(bo, 0);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to move cursor bo into the GTT\n");
|
||||
goto fail_unpin;
|
||||
}
|
||||
|
||||
addr = obj_priv->gtt_offset;
|
||||
} else {
|
||||
ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
|
||||
|
@ -4016,6 +4028,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|||
intel_crtc->cursor_bo = bo;
|
||||
|
||||
return 0;
|
||||
fail_unpin:
|
||||
i915_gem_object_unpin(bo);
|
||||
fail_locked:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
fail:
|
||||
|
@ -5461,7 +5475,6 @@ static void intel_init_display(struct drm_device *dev)
|
|||
void intel_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int num_pipe;
|
||||
int i;
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
|
@ -5491,13 +5504,13 @@ void intel_modeset_init(struct drm_device *dev)
|
|||
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
|
||||
|
||||
if (IS_MOBILE(dev) || IS_I9XX(dev))
|
||||
num_pipe = 2;
|
||||
dev_priv->num_pipe = 2;
|
||||
else
|
||||
num_pipe = 1;
|
||||
dev_priv->num_pipe = 1;
|
||||
DRM_DEBUG_KMS("%d display pipe%s available.\n",
|
||||
num_pipe, num_pipe > 1 ? "s" : "");
|
||||
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
|
||||
|
||||
for (i = 0; i < num_pipe; i++) {
|
||||
for (i = 0; i < dev_priv->num_pipe; i++) {
|
||||
intel_crtc_init(dev, i);
|
||||
}
|
||||
|
||||
|
|
|
@ -245,6 +245,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||
{
|
||||
struct intel_fbdev *ifbdev;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
|
||||
if (!ifbdev)
|
||||
|
@ -253,8 +254,13 @@ int intel_fbdev_init(struct drm_device *dev)
|
|||
dev_priv->fbdev = ifbdev;
|
||||
ifbdev->helper.funcs = &intel_fb_helper_funcs;
|
||||
|
||||
drm_fb_helper_init(dev, &ifbdev->helper, 2,
|
||||
INTELFB_CONN_LIMIT);
|
||||
ret = drm_fb_helper_init(dev, &ifbdev->helper,
|
||||
dev_priv->num_pipe,
|
||||
INTELFB_CONN_LIMIT);
|
||||
if (ret) {
|
||||
kfree(ifbdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
|
||||
drm_fb_helper_initial_config(&ifbdev->helper, 32);
|
||||
|
|
|
@ -834,7 +834,7 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
|
|||
if (i2c_index == 0x81)
|
||||
i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
|
||||
|
||||
if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
|
||||
if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
|
||||
NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -3920,7 +3920,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
|
|||
|
||||
static uint8_t *
|
||||
bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
|
||||
uint16_t record, int record_len, int record_nr)
|
||||
uint16_t record, int record_len, int record_nr,
|
||||
bool match_link)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nvbios *bios = &dev_priv->vbios;
|
||||
|
@ -3928,12 +3929,28 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
uint16_t table;
|
||||
int i, v;
|
||||
|
||||
switch (dcbent->type) {
|
||||
case OUTPUT_TMDS:
|
||||
case OUTPUT_LVDS:
|
||||
case OUTPUT_DP:
|
||||
break;
|
||||
default:
|
||||
match_link = false;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < record_nr; i++, record += record_len) {
|
||||
table = ROM16(bios->data[record]);
|
||||
if (!table)
|
||||
continue;
|
||||
entry = ROM32(bios->data[table]);
|
||||
|
||||
if (match_link) {
|
||||
v = (entry & 0x00c00000) >> 22;
|
||||
if (!(v & dcbent->sorconf.link))
|
||||
continue;
|
||||
}
|
||||
|
||||
v = (entry & 0x000f0000) >> 16;
|
||||
if (!(v & dcbent->or))
|
||||
continue;
|
||||
|
@ -3975,7 +3992,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
*length = table[4];
|
||||
return bios_output_config_match(dev, dcbent,
|
||||
bios->display.dp_table_ptr + table[1],
|
||||
table[2], table[3]);
|
||||
table[2], table[3], table[0] >= 0x21);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -4064,7 +4081,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
|
|||
dcbent->type, dcbent->location, dcbent->or);
|
||||
otable = bios_output_config_match(dev, dcbent, table[1] +
|
||||
bios->display.script_table_ptr,
|
||||
table[2], table[3]);
|
||||
table[2], table[3], table[0] >= 0x21);
|
||||
if (!otable) {
|
||||
NV_ERROR(dev, "Couldn't find matching output script table\n");
|
||||
return 1;
|
||||
|
|
|
@ -377,6 +377,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fbdev *nfbdev;
|
||||
int ret;
|
||||
|
||||
nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
|
||||
if (!nfbdev)
|
||||
|
@ -386,7 +387,12 @@ int nouveau_fbcon_init(struct drm_device *dev)
|
|||
dev_priv->nfbdev = nfbdev;
|
||||
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
|
||||
|
||||
drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
|
||||
ret = drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
|
||||
if (ret) {
|
||||
kfree(nfbdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
|
||||
drm_fb_helper_initial_config(&nfbdev->helper, 32);
|
||||
return 0;
|
||||
|
|
|
@ -779,29 +779,24 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* map larger RAMIN aperture on NV40 cards */
|
||||
dev_priv->ramin = NULL;
|
||||
/* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
|
||||
if (dev_priv->card_type >= NV_40) {
|
||||
int ramin_bar = 2;
|
||||
if (pci_resource_len(dev->pdev, ramin_bar) == 0)
|
||||
ramin_bar = 3;
|
||||
|
||||
dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
|
||||
dev_priv->ramin = ioremap(
|
||||
pci_resource_start(dev->pdev, ramin_bar),
|
||||
dev_priv->ramin =
|
||||
ioremap(pci_resource_start(dev->pdev, ramin_bar),
|
||||
dev_priv->ramin_size);
|
||||
if (!dev_priv->ramin) {
|
||||
NV_ERROR(dev, "Failed to init RAMIN mapping, "
|
||||
"limited instance memory available\n");
|
||||
NV_ERROR(dev, "Failed to PRAMIN BAR");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* On older cards (or if the above failed), create a map covering
|
||||
* the BAR0 PRAMIN aperture */
|
||||
if (!dev_priv->ramin) {
|
||||
} else {
|
||||
dev_priv->ramin_size = 1 * 1024 * 1024;
|
||||
dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
|
||||
dev_priv->ramin_size);
|
||||
dev_priv->ramin_size);
|
||||
if (!dev_priv->ramin) {
|
||||
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -6,10 +6,16 @@
|
|||
int
|
||||
nv50_fb_init(struct drm_device *dev)
|
||||
{
|
||||
/* This is needed to get meaningful information from 100c90
|
||||
* on traps. No idea what these values mean exactly. */
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Not a clue what this is exactly. Without pointing it at a
|
||||
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
|
||||
* cause IOMMU "read from address 0" errors (rh#561267)
|
||||
*/
|
||||
nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
|
||||
|
||||
/* This is needed to get meaningful information from 100c90
|
||||
* on traps. No idea what these values mean exactly. */
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x50:
|
||||
nv_wr32(dev, 0x100c90, 0x0707ff);
|
||||
|
|
|
@ -31,7 +31,7 @@ nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
|
|||
{
|
||||
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
|
||||
|
||||
if (gpio->line > 32)
|
||||
if (gpio->line >= 32)
|
||||
return -EINVAL;
|
||||
|
||||
*reg = nv50_gpio_reg[gpio->line >> 3];
|
||||
|
|
|
@ -41,12 +41,18 @@ void evergreen_fini(struct radeon_device *rdev);
|
|||
|
||||
void evergreen_pm_misc(struct radeon_device *rdev)
|
||||
{
|
||||
int requested_index = rdev->pm.requested_power_state_index;
|
||||
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
|
||||
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
|
||||
int req_ps_idx = rdev->pm.requested_power_state_index;
|
||||
int req_cm_idx = rdev->pm.requested_clock_mode_index;
|
||||
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
|
||||
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
|
||||
|
||||
if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
|
||||
radeon_atom_set_voltage(rdev, voltage->voltage);
|
||||
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
|
||||
if (voltage->voltage != rdev->pm.current_vddc) {
|
||||
radeon_atom_set_voltage(rdev, voltage->voltage);
|
||||
rdev->pm.current_vddc = voltage->voltage;
|
||||
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void evergreen_pm_prepare(struct radeon_device *rdev)
|
||||
|
@ -2153,7 +2159,7 @@ int evergreen_init(struct radeon_device *rdev)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
rdev->accel_working = false;
|
||||
rdev->accel_working = true;
|
||||
r = evergreen_startup(rdev);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "disabling GPU acceleration\n");
|
||||
|
|
|
@ -162,6 +162,11 @@ void r100_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid sh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
|
@ -172,6 +177,11 @@ void r100_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid mh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
|
|
|
@ -45,9 +45,14 @@ void r420_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
|
||||
/* low sh */
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid sh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
|
@ -58,6 +63,11 @@ void r420_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid mh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
|
|
|
@ -291,6 +291,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid sh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
|
||||
|
@ -301,6 +306,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid mh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
|
||||
|
@ -317,6 +327,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid sh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
|
||||
|
@ -327,6 +342,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid mh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
|
||||
|
@ -343,6 +363,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid sh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
|
||||
|
@ -353,6 +378,11 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid mh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
|
||||
|
@ -375,6 +405,11 @@ void r600_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid sh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
|
@ -385,6 +420,11 @@ void r600_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid mh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
|
||||
|
@ -401,7 +441,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
/* mid sh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
|
||||
|
@ -411,7 +456,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
/* low mh */
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
|
||||
|
@ -430,14 +480,30 @@ void r600_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
} else {
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
|
||||
}
|
||||
/* mid sh */
|
||||
if (rdev->flags & RADEON_IS_MOBILITY) {
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
|
||||
} else {
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
|
||||
}
|
||||
/* high sh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
|
||||
|
@ -453,14 +519,30 @@ void r600_pm_init_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
} else {
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
|
||||
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
|
||||
}
|
||||
/* mid mh */
|
||||
if (rdev->flags & RADEON_IS_MOBILITY) {
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
|
||||
} else {
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
|
||||
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
|
||||
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
|
||||
}
|
||||
/* high mh */
|
||||
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
|
||||
|
@ -475,13 +557,18 @@ void r600_pm_init_profile(struct radeon_device *rdev)
|
|||
|
||||
void r600_pm_misc(struct radeon_device *rdev)
|
||||
{
|
||||
int requested_index = rdev->pm.requested_power_state_index;
|
||||
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
|
||||
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
|
||||
|
||||
if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
|
||||
radeon_atom_set_voltage(rdev, voltage->voltage);
|
||||
int req_ps_idx = rdev->pm.requested_power_state_index;
|
||||
int req_cm_idx = rdev->pm.requested_clock_mode_index;
|
||||
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
|
||||
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
|
||||
|
||||
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
|
||||
if (voltage->voltage != rdev->pm.current_vddc) {
|
||||
radeon_atom_set_voltage(rdev, voltage->voltage);
|
||||
rdev->pm.current_vddc = voltage->voltage;
|
||||
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool r600_gui_idle(struct radeon_device *rdev)
|
||||
|
|
|
@ -648,15 +648,18 @@ enum radeon_pm_profile_type {
|
|||
PM_PROFILE_DEFAULT,
|
||||
PM_PROFILE_AUTO,
|
||||
PM_PROFILE_LOW,
|
||||
PM_PROFILE_MID,
|
||||
PM_PROFILE_HIGH,
|
||||
};
|
||||
|
||||
#define PM_PROFILE_DEFAULT_IDX 0
|
||||
#define PM_PROFILE_LOW_SH_IDX 1
|
||||
#define PM_PROFILE_HIGH_SH_IDX 2
|
||||
#define PM_PROFILE_LOW_MH_IDX 3
|
||||
#define PM_PROFILE_HIGH_MH_IDX 4
|
||||
#define PM_PROFILE_MAX 5
|
||||
#define PM_PROFILE_MID_SH_IDX 2
|
||||
#define PM_PROFILE_HIGH_SH_IDX 3
|
||||
#define PM_PROFILE_LOW_MH_IDX 4
|
||||
#define PM_PROFILE_MID_MH_IDX 5
|
||||
#define PM_PROFILE_HIGH_MH_IDX 6
|
||||
#define PM_PROFILE_MAX 7
|
||||
|
||||
struct radeon_pm_profile {
|
||||
int dpms_off_ps_idx;
|
||||
|
@ -745,6 +748,7 @@ struct radeon_pm {
|
|||
int default_power_state_index;
|
||||
u32 current_sclk;
|
||||
u32 current_mclk;
|
||||
u32 current_vddc;
|
||||
struct radeon_i2c_chan *i2c_bus;
|
||||
/* selected pm method */
|
||||
enum radeon_pm_method pm_method;
|
||||
|
|
|
@ -1833,10 +1833,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
|||
/* skip invalid modes */
|
||||
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
|
||||
continue;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
|
||||
VOLTAGE_SW;
|
||||
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
|
||||
clock_info->usVDDC;
|
||||
/* voltage works differently on IGPs */
|
||||
mode_index++;
|
||||
} else if (ASIC_IS_DCE4(rdev)) {
|
||||
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
|
||||
|
@ -1969,6 +1966,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
|||
|
||||
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
|
||||
rdev->pm.current_clock_mode_index = 0;
|
||||
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
|
||||
}
|
||||
|
||||
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
|
||||
|
|
|
@ -2026,6 +2026,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
|
|||
combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
|
||||
break;
|
||||
default:
|
||||
ddc_i2c.valid = false;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2339,6 +2340,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
|
|||
if (RBIOS8(tv_info + 6) == 'T') {
|
||||
if (radeon_apply_legacy_tv_quirks(dev)) {
|
||||
hpd.hpd = RADEON_HPD_NONE;
|
||||
ddc_i2c.valid = false;
|
||||
radeon_add_legacy_encoder(dev,
|
||||
radeon_get_encoder_id
|
||||
(dev,
|
||||
|
@ -2455,7 +2457,7 @@ default_mode:
|
|||
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
|
||||
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
|
||||
if ((state_index > 0) &&
|
||||
(rdev->pm.power_state[0].clock_info[0].voltage.type = VOLTAGE_GPIO))
|
||||
(rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage =
|
||||
rdev->pm.power_state[0].clock_info[0].voltage;
|
||||
else
|
||||
|
|
|
@ -284,8 +284,7 @@ static const char *connector_names[15] = {
|
|||
"eDP",
|
||||
};
|
||||
|
||||
static const char *hpd_names[7] = {
|
||||
"NONE",
|
||||
static const char *hpd_names[6] = {
|
||||
"HPD1",
|
||||
"HPD2",
|
||||
"HPD3",
|
||||
|
|
|
@ -45,9 +45,10 @@
|
|||
* - 2.2.0 - add r6xx/r7xx const buffer support
|
||||
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
|
||||
* - 2.4.0 - add crtc id query
|
||||
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 4
|
||||
#define KMS_DRIVER_MINOR 5
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
|
|
|
@ -363,6 +363,7 @@ int radeon_fbdev_init(struct radeon_device *rdev)
|
|||
{
|
||||
struct radeon_fbdev *rfbdev;
|
||||
int bpp_sel = 32;
|
||||
int ret;
|
||||
|
||||
/* select 8 bpp console on RN50 or 16MB cards */
|
||||
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
|
||||
|
@ -376,9 +377,14 @@ int radeon_fbdev_init(struct radeon_device *rdev)
|
|||
rdev->mode_info.rfbdev = rfbdev;
|
||||
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
|
||||
|
||||
drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
|
||||
rdev->num_crtc,
|
||||
RADEONFB_CONN_LIMIT);
|
||||
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
|
||||
rdev->num_crtc,
|
||||
RADEONFB_CONN_LIMIT);
|
||||
if (ret) {
|
||||
kfree(rfbdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
|
||||
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
|
||||
return 0;
|
||||
|
|
|
@ -118,7 +118,11 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
value = rdev->num_z_pipes;
|
||||
break;
|
||||
case RADEON_INFO_ACCEL_WORKING:
|
||||
value = rdev->accel_working;
|
||||
/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
|
||||
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
|
||||
value = false;
|
||||
else
|
||||
value = rdev->accel_working;
|
||||
break;
|
||||
case RADEON_INFO_CRTC_FROM_ID:
|
||||
for (i = 0, found = 0; i < rdev->num_crtc; i++) {
|
||||
|
@ -134,6 +138,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_ACCEL_WORKING2:
|
||||
value = rdev->accel_working;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("Invalid request %d\n", info->request);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1168,6 +1168,17 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
|
|||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
|
||||
bool color = true;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
/* find out if crtc2 is in use or if this encoder is using it */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
|
||||
if (encoder->crtc != crtc) {
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
|
||||
|
|
|
@ -33,6 +33,14 @@
|
|||
#define RADEON_WAIT_VBLANK_TIMEOUT 200
|
||||
#define RADEON_WAIT_IDLE_TIMEOUT 200
|
||||
|
||||
static const char *radeon_pm_state_type_name[5] = {
|
||||
"Default",
|
||||
"Powersave",
|
||||
"Battery",
|
||||
"Balanced",
|
||||
"Performance",
|
||||
};
|
||||
|
||||
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
|
||||
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
|
||||
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
|
||||
|
@ -84,9 +92,9 @@ static void radeon_pm_update_profile(struct radeon_device *rdev)
|
|||
rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
|
||||
} else {
|
||||
if (rdev->pm.active_crtc_count > 1)
|
||||
rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
|
||||
rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
|
||||
else
|
||||
rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
|
||||
rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
|
||||
}
|
||||
break;
|
||||
case PM_PROFILE_LOW:
|
||||
|
@ -95,6 +103,12 @@ static void radeon_pm_update_profile(struct radeon_device *rdev)
|
|||
else
|
||||
rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
|
||||
break;
|
||||
case PM_PROFILE_MID:
|
||||
if (rdev->pm.active_crtc_count > 1)
|
||||
rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
|
||||
else
|
||||
rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
|
||||
break;
|
||||
case PM_PROFILE_HIGH:
|
||||
if (rdev->pm.active_crtc_count > 1)
|
||||
rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
|
||||
|
@ -127,15 +141,6 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
|
|||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
ttm_bo_unmap_virtual(&bo->tbo);
|
||||
}
|
||||
|
||||
if (rdev->gart.table.vram.robj)
|
||||
ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
|
||||
|
||||
if (rdev->stollen_vga_memory)
|
||||
ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
|
||||
|
||||
if (rdev->r600_blit.shader_obj)
|
||||
ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
|
||||
}
|
||||
|
||||
static void radeon_sync_with_vblank(struct radeon_device *rdev)
|
||||
|
@ -281,6 +286,42 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
|
|||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
}
|
||||
|
||||
static void radeon_pm_print_states(struct radeon_device *rdev)
|
||||
{
|
||||
int i, j;
|
||||
struct radeon_power_state *power_state;
|
||||
struct radeon_pm_clock_info *clock_info;
|
||||
|
||||
DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states);
|
||||
for (i = 0; i < rdev->pm.num_power_states; i++) {
|
||||
power_state = &rdev->pm.power_state[i];
|
||||
DRM_DEBUG("State %d: %s\n", i,
|
||||
radeon_pm_state_type_name[power_state->type]);
|
||||
if (i == rdev->pm.default_power_state_index)
|
||||
DRM_DEBUG("\tDefault");
|
||||
if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
|
||||
DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes);
|
||||
if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
|
||||
DRM_DEBUG("\tSingle display only\n");
|
||||
DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
|
||||
for (j = 0; j < power_state->num_clock_modes; j++) {
|
||||
clock_info = &(power_state->clock_info[j]);
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
DRM_DEBUG("\t\t%d e: %d%s\n",
|
||||
j,
|
||||
clock_info->sclk * 10,
|
||||
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
|
||||
else
|
||||
DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n",
|
||||
j,
|
||||
clock_info->sclk * 10,
|
||||
clock_info->mclk * 10,
|
||||
clock_info->voltage.voltage,
|
||||
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t radeon_get_pm_profile(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -311,6 +352,8 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
|
|||
rdev->pm.profile = PM_PROFILE_AUTO;
|
||||
else if (strncmp("low", buf, strlen("low")) == 0)
|
||||
rdev->pm.profile = PM_PROFILE_LOW;
|
||||
else if (strncmp("mid", buf, strlen("mid")) == 0)
|
||||
rdev->pm.profile = PM_PROFILE_MID;
|
||||
else if (strncmp("high", buf, strlen("high")) == 0)
|
||||
rdev->pm.profile = PM_PROFILE_HIGH;
|
||||
else {
|
||||
|
@ -377,15 +420,19 @@ void radeon_pm_suspend(struct radeon_device *rdev)
|
|||
{
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
|
||||
rdev->pm.current_power_state_index = -1;
|
||||
rdev->pm.current_clock_mode_index = -1;
|
||||
rdev->pm.current_sclk = 0;
|
||||
rdev->pm.current_mclk = 0;
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
}
|
||||
|
||||
void radeon_pm_resume(struct radeon_device *rdev)
|
||||
{
|
||||
/* asic init will reset the default power state */
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
|
||||
rdev->pm.current_clock_mode_index = 0;
|
||||
rdev->pm.current_sclk = rdev->clock.default_sclk;
|
||||
rdev->pm.current_mclk = rdev->clock.default_mclk;
|
||||
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
}
|
||||
|
||||
|
@ -394,32 +441,24 @@ int radeon_pm_init(struct radeon_device *rdev)
|
|||
int ret;
|
||||
/* default to profile method */
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
rdev->pm.profile = PM_PROFILE_DEFAULT;
|
||||
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
|
||||
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
|
||||
rdev->pm.dynpm_can_upclock = true;
|
||||
rdev->pm.dynpm_can_downclock = true;
|
||||
rdev->pm.current_sclk = 0;
|
||||
rdev->pm.current_mclk = 0;
|
||||
rdev->pm.current_sclk = rdev->clock.default_sclk;
|
||||
rdev->pm.current_mclk = rdev->clock.default_mclk;
|
||||
|
||||
if (rdev->bios) {
|
||||
if (rdev->is_atom_bios)
|
||||
radeon_atombios_get_power_modes(rdev);
|
||||
else
|
||||
radeon_combios_get_power_modes(rdev);
|
||||
radeon_pm_print_states(rdev);
|
||||
radeon_pm_init_profile(rdev);
|
||||
rdev->pm.current_power_state_index = -1;
|
||||
rdev->pm.current_clock_mode_index = -1;
|
||||
}
|
||||
|
||||
if (rdev->pm.num_power_states > 1) {
|
||||
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
rdev->pm.profile = PM_PROFILE_DEFAULT;
|
||||
radeon_pm_update_profile(rdev);
|
||||
radeon_pm_set_clocks(rdev);
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
}
|
||||
|
||||
/* where's the best place to put these? */
|
||||
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
|
||||
if (ret)
|
||||
|
@ -705,6 +744,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
|
||||
if (rdev->asic->get_memory_clock)
|
||||
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
|
||||
if (rdev->pm.current_vddc)
|
||||
seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
|
||||
if (rdev->asic->get_pcie_lanes)
|
||||
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
|
||||
|
||||
|
|
|
@ -44,12 +44,18 @@ void rv770_fini(struct radeon_device *rdev);
|
|||
|
||||
void rv770_pm_misc(struct radeon_device *rdev)
|
||||
{
|
||||
int requested_index = rdev->pm.requested_power_state_index;
|
||||
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
|
||||
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
|
||||
int req_ps_idx = rdev->pm.requested_power_state_index;
|
||||
int req_cm_idx = rdev->pm.requested_clock_mode_index;
|
||||
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
|
||||
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
|
||||
|
||||
if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
|
||||
radeon_atom_set_voltage(rdev, voltage->voltage);
|
||||
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
|
||||
if (voltage->voltage != rdev->pm.current_vddc) {
|
||||
radeon_atom_set_voltage(rdev, voltage->voltage);
|
||||
rdev->pm.current_vddc = voltage->voltage;
|
||||
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -644,6 +644,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
|||
ret = copy_from_user(cmd, user_cmd, arg->command_size);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -EFAULT;
|
||||
DRM_ERROR("Failed copying commands.\n");
|
||||
goto out_commit;
|
||||
}
|
||||
|
|
|
@ -597,8 +597,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
ret = copy_from_user(srf->sizes, user_sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0))
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -EFAULT;
|
||||
goto out_err1;
|
||||
}
|
||||
|
||||
if (srf->scanout &&
|
||||
srf->num_sizes == 1 &&
|
||||
|
@ -697,9 +699,11 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
|||
if (user_sizes)
|
||||
ret = copy_to_user(user_sizes, srf->sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0))
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("copy_to_user failed %p %u\n",
|
||||
user_sizes, srf->num_sizes);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
out_bad_resource:
|
||||
out_no_reference:
|
||||
ttm_base_object_unref(&base);
|
||||
|
|
|
@ -678,7 +678,7 @@ static const struct file_operations hp_sdc_rtc_fops = {
|
|||
.llseek = no_llseek,
|
||||
.read = hp_sdc_rtc_read,
|
||||
.poll = hp_sdc_rtc_poll,
|
||||
.unlocked_ioctl = hp_sdc_rtc_ioctl,
|
||||
.unlocked_ioctl = hp_sdc_rtc_unlocked_ioctl,
|
||||
.open = hp_sdc_rtc_open,
|
||||
.fasync = hp_sdc_rtc_fasync,
|
||||
};
|
||||
|
|
|
@ -1020,12 +1020,12 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
|
|||
if (cmd == AVMB1_ADDCARD) {
|
||||
if ((retval = copy_from_user(&cdef, data,
|
||||
sizeof(avmb1_carddef))))
|
||||
return retval;
|
||||
return -EFAULT;
|
||||
cdef.cardtype = AVM_CARDTYPE_B1;
|
||||
} else {
|
||||
if ((retval = copy_from_user(&cdef, data,
|
||||
sizeof(avmb1_extcarddef))))
|
||||
return retval;
|
||||
return -EFAULT;
|
||||
}
|
||||
cparams.port = cdef.port;
|
||||
cparams.irq = cdef.irq;
|
||||
|
@ -1218,7 +1218,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
|
|||
kcapi_carddef cdef;
|
||||
|
||||
if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
|
||||
return retval;
|
||||
return -EFAULT;
|
||||
|
||||
cparams.port = cdef.port;
|
||||
cparams.irq = cdef.irq;
|
||||
|
|
|
@ -249,7 +249,7 @@ config MMC_IMX
|
|||
|
||||
config MMC_MSM7X00A
|
||||
tristate "Qualcomm MSM 7X00A SDCC Controller Support"
|
||||
depends on MMC && ARCH_MSM
|
||||
depends on MMC && ARCH_MSM && !ARCH_MSM7X30
|
||||
help
|
||||
This provides support for the SD/MMC cell found in the
|
||||
MSM 7X00A controllers from Qualcomm.
|
||||
|
|
|
@ -404,14 +404,9 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
|
|||
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
||||
return -EINVAL;
|
||||
|
||||
ops.oobbuf = kmalloc(length, GFP_KERNEL);
|
||||
if (!ops.oobbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(ops.oobbuf, ptr, length)) {
|
||||
kfree(ops.oobbuf);
|
||||
return -EFAULT;
|
||||
}
|
||||
ops.oobbuf = memdup_user(ptr, length);
|
||||
if (IS_ERR(ops.oobbuf))
|
||||
return PTR_ERR(ops.oobbuf);
|
||||
|
||||
start &= ~((uint64_t)mtd->oobsize - 1);
|
||||
ret = mtd->write_oob(mtd, start, &ops);
|
||||
|
|
|
@ -1,13 +1,3 @@
|
|||
menuconfig MTD_NAND
|
||||
tristate "NAND Device Support"
|
||||
depends on MTD
|
||||
select MTD_NAND_IDS
|
||||
select MTD_NAND_ECC
|
||||
help
|
||||
This enables support for accessing all type of NAND flash
|
||||
devices. For further information see
|
||||
<http://www.linux-mtd.infradead.org/doc/nand.html>.
|
||||
|
||||
config MTD_NAND_ECC
|
||||
tristate
|
||||
|
||||
|
@ -19,6 +9,17 @@ config MTD_NAND_ECC_SMC
|
|||
Software ECC according to the Smart Media Specification.
|
||||
The original Linux implementation had byte 0 and 1 swapped.
|
||||
|
||||
|
||||
menuconfig MTD_NAND
|
||||
tristate "NAND Device Support"
|
||||
depends on MTD
|
||||
select MTD_NAND_IDS
|
||||
select MTD_NAND_ECC
|
||||
help
|
||||
This enables support for accessing all type of NAND flash
|
||||
devices. For further information see
|
||||
<http://www.linux-mtd.infradead.org/doc/nand.html>.
|
||||
|
||||
if MTD_NAND
|
||||
|
||||
config MTD_NAND_VERIFY_WRITE
|
||||
|
|
|
@ -150,7 +150,6 @@ static void r852_dma_done(struct r852_device *dev, int error)
|
|||
if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
|
||||
pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
|
||||
dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
|
||||
complete(&dev->dma_done);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -182,6 +181,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
|
|||
/* Set dma direction */
|
||||
dev->dma_dir = do_read;
|
||||
dev->dma_stage = 1;
|
||||
INIT_COMPLETION(dev->dma_done);
|
||||
|
||||
dbg_verbose("doing dma %s ", do_read ? "read" : "write");
|
||||
|
||||
|
@ -494,6 +494,11 @@ int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
|
|||
if (dev->card_unstable)
|
||||
return 0;
|
||||
|
||||
if (dev->dma_error) {
|
||||
dev->dma_error = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
|
||||
ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
|
||||
r852_write_reg(dev, R852_CTL, dev->ctlreg);
|
||||
|
@ -707,6 +712,7 @@ void r852_card_detect_work(struct work_struct *work)
|
|||
container_of(work, struct r852_device, card_detect_work.work);
|
||||
|
||||
r852_card_update_present(dev);
|
||||
r852_update_card_detect(dev);
|
||||
dev->card_unstable = 0;
|
||||
|
||||
/* False alarm */
|
||||
|
@ -722,7 +728,6 @@ void r852_card_detect_work(struct work_struct *work)
|
|||
else
|
||||
r852_unregister_nand_device(dev);
|
||||
exit:
|
||||
/* Update detection logic */
|
||||
r852_update_card_detect(dev);
|
||||
}
|
||||
|
||||
|
@ -796,6 +801,7 @@ static irqreturn_t r852_irq(int irq, void *data)
|
|||
if (dma_status & R852_DMA_IRQ_ERROR) {
|
||||
dbg("recieved dma error IRQ");
|
||||
r852_dma_done(dev, -EIO);
|
||||
complete(&dev->dma_done);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -825,8 +831,10 @@ static irqreturn_t r852_irq(int irq, void *data)
|
|||
r852_dma_enable(dev);
|
||||
|
||||
/* Operation done */
|
||||
if (dev->dma_stage == 3)
|
||||
if (dev->dma_stage == 3) {
|
||||
r852_dma_done(dev, 0);
|
||||
complete(&dev->dma_done);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -940,18 +948,19 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
|
|||
|
||||
r852_dma_test(dev);
|
||||
|
||||
dev->irq = pci_dev->irq;
|
||||
spin_lock_init(&dev->irqlock);
|
||||
|
||||
dev->card_detected = 0;
|
||||
r852_card_update_present(dev);
|
||||
|
||||
/*register irq handler*/
|
||||
error = -ENODEV;
|
||||
if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
|
||||
DRV_NAME, dev))
|
||||
goto error10;
|
||||
|
||||
dev->irq = pci_dev->irq;
|
||||
spin_lock_init(&dev->irqlock);
|
||||
|
||||
/* kick initial present test */
|
||||
dev->card_detected = 0;
|
||||
r852_card_update_present(dev);
|
||||
queue_delayed_work(dev->card_workqueue,
|
||||
&dev->card_detect_work, 0);
|
||||
|
||||
|
@ -1081,7 +1090,7 @@ int r852_resume(struct device *device)
|
|||
dev->card_detected ? "added" : "removed");
|
||||
|
||||
queue_delayed_work(dev->card_workqueue,
|
||||
&dev->card_detect_work, 1000);
|
||||
&dev->card_detect_work, msecs_to_jiffies(1000));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -598,8 +598,8 @@ rx_next:
|
|||
goto rx_status_loop;
|
||||
|
||||
spin_lock_irqsave(&cp->lock, flags);
|
||||
cpw16_f(IntrMask, cp_intr_mask);
|
||||
__napi_complete(napi);
|
||||
cpw16_f(IntrMask, cp_intr_mask);
|
||||
spin_unlock_irqrestore(&cp->lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -860,6 +860,7 @@ retry:
|
|||
}
|
||||
|
||||
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
|
||||
i = 0;
|
||||
dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n");
|
||||
dev_dbg(&pdev->dev, "TxConfig = 0x%lx\n", RTL_R32 (TxConfig));
|
||||
tp->chipset = 0;
|
||||
|
@ -2088,8 +2089,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
|
|||
* again when we think we are done.
|
||||
*/
|
||||
spin_lock_irqsave(&tp->lock, flags);
|
||||
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
||||
__napi_complete(napi);
|
||||
RTL_W16_F(IntrMask, rtl8139_intr_mask);
|
||||
spin_unlock_irqrestore(&tp->lock, flags);
|
||||
}
|
||||
spin_unlock(&tp->rx_lock);
|
||||
|
|
|
@ -247,6 +247,7 @@ static const struct flash_spec flash_5709 = {
|
|||
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
|
||||
|
||||
static void bnx2_init_napi(struct bnx2 *bp);
|
||||
static void bnx2_del_napi(struct bnx2 *bp);
|
||||
|
||||
static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
|
||||
{
|
||||
|
@ -6270,6 +6271,7 @@ open_err:
|
|||
bnx2_free_skbs(bp);
|
||||
bnx2_free_irq(bp);
|
||||
bnx2_free_mem(bp);
|
||||
bnx2_del_napi(bp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -6537,6 +6539,7 @@ bnx2_close(struct net_device *dev)
|
|||
bnx2_free_irq(bp);
|
||||
bnx2_free_skbs(bp);
|
||||
bnx2_free_mem(bp);
|
||||
bnx2_del_napi(bp);
|
||||
bp->link_up = 0;
|
||||
netif_carrier_off(bp->dev);
|
||||
bnx2_set_power_state(bp, PCI_D3hot);
|
||||
|
@ -8227,7 +8230,16 @@ bnx2_bus_string(struct bnx2 *bp, char *str)
|
|||
return str;
|
||||
}
|
||||
|
||||
static void __devinit
|
||||
static void
|
||||
bnx2_del_napi(struct bnx2 *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bp->irq_nvecs; i++)
|
||||
netif_napi_del(&bp->bnx2_napi[i].napi);
|
||||
}
|
||||
|
||||
static void
|
||||
bnx2_init_napi(struct bnx2 *bp)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -2554,7 +2554,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
|
|||
mdef = er32(MDEF(i));
|
||||
|
||||
/* Ignore filters with anything other than IPMI ports */
|
||||
if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
|
||||
if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
|
||||
continue;
|
||||
|
||||
/* Enable this decision filter in MANC2H */
|
||||
|
|
|
@ -74,7 +74,14 @@ struct enic_msix_entry {
|
|||
void *devid;
|
||||
};
|
||||
|
||||
#define ENIC_SET_APPLIED (1 << 0)
|
||||
#define ENIC_SET_REQUEST (1 << 1)
|
||||
#define ENIC_SET_NAME (1 << 2)
|
||||
#define ENIC_SET_INSTANCE (1 << 3)
|
||||
#define ENIC_SET_HOST (1 << 4)
|
||||
|
||||
struct enic_port_profile {
|
||||
u32 set;
|
||||
u8 request;
|
||||
char name[PORT_PROFILE_MAX];
|
||||
u8 instance_uuid[PORT_UUID_MAX];
|
||||
|
|
|
@ -1029,8 +1029,7 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
|
||||
char *name, u8 *instance_uuid, u8 *host_uuid)
|
||||
static int enic_set_port_profile(struct enic *enic, u8 *mac)
|
||||
{
|
||||
struct vic_provinfo *vp;
|
||||
u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
|
||||
|
@ -1040,97 +1039,112 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
|
|||
"%02X%02X-%02X%02X%02X%02X%0X%02X";
|
||||
int err;
|
||||
|
||||
if (!name)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_valid_ether_addr(mac))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE);
|
||||
if (!vp)
|
||||
return -ENOMEM;
|
||||
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
|
||||
strlen(name) + 1, name);
|
||||
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
|
||||
ETH_ALEN, mac);
|
||||
|
||||
if (instance_uuid) {
|
||||
uuid = instance_uuid;
|
||||
sprintf(uuid_str, uuid_fmt,
|
||||
uuid[0], uuid[1], uuid[2], uuid[3],
|
||||
uuid[4], uuid[5], uuid[6], uuid[7],
|
||||
uuid[8], uuid[9], uuid[10], uuid[11],
|
||||
uuid[12], uuid[13], uuid[14], uuid[15]);
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
|
||||
sizeof(uuid_str), uuid_str);
|
||||
}
|
||||
|
||||
if (host_uuid) {
|
||||
uuid = host_uuid;
|
||||
sprintf(uuid_str, uuid_fmt,
|
||||
uuid[0], uuid[1], uuid[2], uuid[3],
|
||||
uuid[4], uuid[5], uuid[6], uuid[7],
|
||||
uuid[8], uuid[9], uuid[10], uuid[11],
|
||||
uuid[12], uuid[13], uuid[14], uuid[15]);
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_HOST_UUID_STR,
|
||||
sizeof(uuid_str), uuid_str);
|
||||
}
|
||||
|
||||
err = enic_vnic_dev_deinit(enic);
|
||||
if (err)
|
||||
goto err_out;
|
||||
return err;
|
||||
|
||||
memset(&enic->pp, 0, sizeof(enic->pp));
|
||||
switch (enic->pp.request) {
|
||||
|
||||
err = enic_dev_init_prov(enic, vp);
|
||||
if (err)
|
||||
goto err_out;
|
||||
case PORT_REQUEST_ASSOCIATE:
|
||||
|
||||
enic->pp.request = request;
|
||||
memcpy(enic->pp.name, name, PORT_PROFILE_MAX);
|
||||
if (instance_uuid)
|
||||
memcpy(enic->pp.instance_uuid,
|
||||
instance_uuid, PORT_UUID_MAX);
|
||||
if (host_uuid)
|
||||
memcpy(enic->pp.host_uuid,
|
||||
host_uuid, PORT_UUID_MAX);
|
||||
if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
|
||||
return -EINVAL;
|
||||
|
||||
err_out:
|
||||
vic_provinfo_free(vp);
|
||||
if (!is_valid_ether_addr(mac))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
return err;
|
||||
}
|
||||
vp = vic_provinfo_alloc(GFP_KERNEL, oui,
|
||||
VIC_PROVINFO_LINUX_TYPE);
|
||||
if (!vp)
|
||||
return -ENOMEM;
|
||||
|
||||
static int enic_unset_port_profile(struct enic *enic)
|
||||
{
|
||||
memset(&enic->pp, 0, sizeof(enic->pp));
|
||||
return enic_vnic_dev_deinit(enic);
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
|
||||
strlen(enic->pp.name) + 1, enic->pp.name);
|
||||
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
|
||||
ETH_ALEN, mac);
|
||||
|
||||
if (enic->pp.set & ENIC_SET_INSTANCE) {
|
||||
uuid = enic->pp.instance_uuid;
|
||||
sprintf(uuid_str, uuid_fmt,
|
||||
uuid[0], uuid[1], uuid[2], uuid[3],
|
||||
uuid[4], uuid[5], uuid[6], uuid[7],
|
||||
uuid[8], uuid[9], uuid[10], uuid[11],
|
||||
uuid[12], uuid[13], uuid[14], uuid[15]);
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
|
||||
sizeof(uuid_str), uuid_str);
|
||||
}
|
||||
|
||||
if (enic->pp.set & ENIC_SET_HOST) {
|
||||
uuid = enic->pp.host_uuid;
|
||||
sprintf(uuid_str, uuid_fmt,
|
||||
uuid[0], uuid[1], uuid[2], uuid[3],
|
||||
uuid[4], uuid[5], uuid[6], uuid[7],
|
||||
uuid[8], uuid[9], uuid[10], uuid[11],
|
||||
uuid[12], uuid[13], uuid[14], uuid[15]);
|
||||
vic_provinfo_add_tlv(vp,
|
||||
VIC_LINUX_PROV_TLV_HOST_UUID_STR,
|
||||
sizeof(uuid_str), uuid_str);
|
||||
}
|
||||
|
||||
err = enic_dev_init_prov(enic, vp);
|
||||
vic_provinfo_free(vp);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
|
||||
case PORT_REQUEST_DISASSOCIATE:
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
enic->pp.set |= ENIC_SET_APPLIED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int enic_set_vf_port(struct net_device *netdev, int vf,
|
||||
struct nlattr *port[])
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
char *name = NULL;
|
||||
u8 *instance_uuid = NULL;
|
||||
u8 *host_uuid = NULL;
|
||||
u8 request = PORT_REQUEST_DISASSOCIATE;
|
||||
|
||||
memset(&enic->pp, 0, sizeof(enic->pp));
|
||||
|
||||
if (port[IFLA_PORT_REQUEST]) {
|
||||
enic->pp.set |= ENIC_SET_REQUEST;
|
||||
enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
|
||||
}
|
||||
|
||||
if (port[IFLA_PORT_PROFILE]) {
|
||||
enic->pp.set |= ENIC_SET_NAME;
|
||||
memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
|
||||
PORT_PROFILE_MAX);
|
||||
}
|
||||
|
||||
if (port[IFLA_PORT_INSTANCE_UUID]) {
|
||||
enic->pp.set |= ENIC_SET_INSTANCE;
|
||||
memcpy(enic->pp.instance_uuid,
|
||||
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
|
||||
}
|
||||
|
||||
if (port[IFLA_PORT_HOST_UUID]) {
|
||||
enic->pp.set |= ENIC_SET_HOST;
|
||||
memcpy(enic->pp.host_uuid,
|
||||
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
|
||||
}
|
||||
|
||||
/* don't support VFs, yet */
|
||||
if (vf != PORT_SELF_VF)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (port[IFLA_PORT_REQUEST])
|
||||
request = nla_get_u8(port[IFLA_PORT_REQUEST]);
|
||||
if (!(enic->pp.set & ENIC_SET_REQUEST))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (request) {
|
||||
case PORT_REQUEST_ASSOCIATE:
|
||||
if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
|
||||
|
||||
/* If the interface mac addr hasn't been assigned,
|
||||
* assign a random mac addr before setting port-
|
||||
|
@ -1139,30 +1153,9 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
|
|||
|
||||
if (is_zero_ether_addr(netdev->dev_addr))
|
||||
random_ether_addr(netdev->dev_addr);
|
||||
|
||||
if (port[IFLA_PORT_PROFILE])
|
||||
name = nla_data(port[IFLA_PORT_PROFILE]);
|
||||
|
||||
if (port[IFLA_PORT_INSTANCE_UUID])
|
||||
instance_uuid =
|
||||
nla_data(port[IFLA_PORT_INSTANCE_UUID]);
|
||||
|
||||
if (port[IFLA_PORT_HOST_UUID])
|
||||
host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]);
|
||||
|
||||
return enic_set_port_profile(enic, request,
|
||||
netdev->dev_addr, name,
|
||||
instance_uuid, host_uuid);
|
||||
|
||||
case PORT_REQUEST_DISASSOCIATE:
|
||||
|
||||
return enic_unset_port_profile(enic);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
return enic_set_port_profile(enic, netdev->dev_addr);
|
||||
}
|
||||
|
||||
static int enic_get_vf_port(struct net_device *netdev, int vf,
|
||||
|
@ -1172,14 +1165,12 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
|
|||
int err, error, done;
|
||||
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
|
||||
|
||||
/* don't support VFs, yet */
|
||||
if (vf != PORT_SELF_VF)
|
||||
return -EOPNOTSUPP;
|
||||
if (!(enic->pp.set & ENIC_SET_APPLIED))
|
||||
return -ENODATA;
|
||||
|
||||
err = enic_dev_init_done(enic, &done, &error);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
error = err;
|
||||
|
||||
switch (error) {
|
||||
case ERR_SUCCESS:
|
||||
|
@ -1202,12 +1193,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
|
|||
|
||||
NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
|
||||
NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
|
||||
NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
|
||||
enic->pp.name);
|
||||
NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
|
||||
enic->pp.instance_uuid);
|
||||
NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
|
||||
enic->pp.host_uuid);
|
||||
if (enic->pp.set & ENIC_SET_NAME)
|
||||
NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
|
||||
enic->pp.name);
|
||||
if (enic->pp.set & ENIC_SET_INSTANCE)
|
||||
NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
|
||||
enic->pp.instance_uuid);
|
||||
if (enic->pp.set & ENIC_SET_HOST)
|
||||
NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
|
||||
enic->pp.host_uuid);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -87,6 +87,7 @@ static int rx_copybreak;
|
|||
#include <linux/bitops.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/* These identify the driver base version and may not be removed. */
|
||||
static char version[] __devinitdata =
|
||||
|
@ -230,7 +231,7 @@ static const u16 media2miictl[16] = {
|
|||
* The EPIC100 Rx and Tx buffer descriptors. Note that these
|
||||
* really ARE host-endian; it's not a misannotation. We tell
|
||||
* the card to byteswap them internally on big-endian hosts -
|
||||
* look for #ifdef CONFIG_BIG_ENDIAN in epic_open().
|
||||
* look for #ifdef __BIG_ENDIAN in epic_open().
|
||||
*/
|
||||
|
||||
struct epic_tx_desc {
|
||||
|
@ -690,7 +691,7 @@ static int epic_open(struct net_device *dev)
|
|||
outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
|
||||
|
||||
/* Tell the chip to byteswap descriptors on big-endian hosts */
|
||||
#ifdef CONFIG_BIG_ENDIAN
|
||||
#ifdef __BIG_ENDIAN
|
||||
outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
|
||||
inl(ioaddr + GENCTL);
|
||||
outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
|
||||
|
@ -806,7 +807,7 @@ static void epic_restart(struct net_device *dev)
|
|||
for (i = 16; i > 0; i--)
|
||||
outl(0x0008, ioaddr + TEST1);
|
||||
|
||||
#ifdef CONFIG_BIG_ENDIAN
|
||||
#ifdef __BIG_ENDIAN
|
||||
outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
|
||||
#else
|
||||
outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
|
||||
|
|
|
@ -1373,10 +1373,9 @@ fec_suspend(struct platform_device *dev, pm_message_t state)
|
|||
|
||||
if (ndev) {
|
||||
fep = netdev_priv(ndev);
|
||||
if (netif_running(ndev)) {
|
||||
netif_device_detach(ndev);
|
||||
fec_stop(ndev);
|
||||
}
|
||||
if (netif_running(ndev))
|
||||
fec_enet_close(ndev);
|
||||
clk_disable(fep->clk);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1385,12 +1384,13 @@ static int
|
|||
fec_resume(struct platform_device *dev)
|
||||
{
|
||||
struct net_device *ndev = platform_get_drvdata(dev);
|
||||
struct fec_enet_private *fep;
|
||||
|
||||
if (ndev) {
|
||||
if (netif_running(ndev)) {
|
||||
fec_enet_init(ndev, 0);
|
||||
netif_device_attach(ndev);
|
||||
}
|
||||
fep = netdev_priv(ndev);
|
||||
clk_enable(fep->clk);
|
||||
if (netif_running(ndev))
|
||||
fec_enet_open(ndev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -747,8 +747,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
|
|||
FSL_GIANFAR_DEV_HAS_CSUM |
|
||||
FSL_GIANFAR_DEV_HAS_VLAN |
|
||||
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
|
||||
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
|
||||
FSL_GIANFAR_DEV_HAS_TIMER;
|
||||
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
|
||||
|
||||
ctype = of_get_property(np, "phy-connection-type", NULL);
|
||||
|
||||
|
|
|
@ -1188,6 +1188,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
|
|||
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
|
||||
} else {
|
||||
hw_dbg(hw, "RAR index %d is out of range.\n", index);
|
||||
return IXGBE_ERR_RAR_INDEX;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1219,6 +1220,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
|
|||
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
|
||||
} else {
|
||||
hw_dbg(hw, "RAR index %d is out of range.\n", index);
|
||||
return IXGBE_ERR_RAR_INDEX;
|
||||
}
|
||||
|
||||
/* clear VMDq pool/queue selection for this RAR */
|
||||
|
|
|
@ -642,7 +642,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
|
|||
u32 txoff = IXGBE_TFCS_TXOFF;
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
|
||||
if (adapter->dcb_cfg.pfc_mode_enable) {
|
||||
int tc;
|
||||
int reg_idx = tx_ring->reg_idx;
|
||||
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
|
||||
|
|
|
@ -2609,6 +2609,7 @@ struct ixgbe_info {
|
|||
#define IXGBE_ERR_EEPROM_VERSION -24
|
||||
#define IXGBE_ERR_NO_SPACE -25
|
||||
#define IXGBE_ERR_OVERTEMP -26
|
||||
#define IXGBE_ERR_RAR_INDEX -27
|
||||
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
|
||||
|
||||
#endif /* _IXGBE_TYPE_H_ */
|
||||
|
|
|
@ -135,6 +135,7 @@ struct korina_private {
|
|||
struct napi_struct napi;
|
||||
struct timer_list media_check_timer;
|
||||
struct mii_if_info mii_if;
|
||||
struct work_struct restart_task;
|
||||
struct net_device *dev;
|
||||
int phy_addr;
|
||||
};
|
||||
|
@ -375,7 +376,7 @@ static int korina_rx(struct net_device *dev, int limit)
|
|||
if (devcs & ETH_RX_LE)
|
||||
dev->stats.rx_length_errors++;
|
||||
if (devcs & ETH_RX_OVR)
|
||||
dev->stats.rx_over_errors++;
|
||||
dev->stats.rx_fifo_errors++;
|
||||
if (devcs & ETH_RX_CV)
|
||||
dev->stats.rx_frame_errors++;
|
||||
if (devcs & ETH_RX_CES)
|
||||
|
@ -764,10 +765,9 @@ static int korina_alloc_ring(struct net_device *dev)
|
|||
|
||||
/* Initialize the receive descriptors */
|
||||
for (i = 0; i < KORINA_NUM_RDS; i++) {
|
||||
skb = dev_alloc_skb(KORINA_RBSIZE + 2);
|
||||
skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
skb_reserve(skb, 2);
|
||||
lp->rx_skb[i] = skb;
|
||||
lp->rd_ring[i].control = DMA_DESC_IOD |
|
||||
DMA_COUNT(KORINA_RBSIZE);
|
||||
|
@ -890,12 +890,12 @@ static int korina_init(struct net_device *dev)
|
|||
|
||||
/*
|
||||
* Restart the RC32434 ethernet controller.
|
||||
* FIXME: check the return status where we call it
|
||||
*/
|
||||
static int korina_restart(struct net_device *dev)
|
||||
static void korina_restart_task(struct work_struct *work)
|
||||
{
|
||||
struct korina_private *lp = netdev_priv(dev);
|
||||
int ret;
|
||||
struct korina_private *lp = container_of(work,
|
||||
struct korina_private, restart_task);
|
||||
struct net_device *dev = lp->dev;
|
||||
|
||||
/*
|
||||
* Disable interrupts
|
||||
|
@ -916,10 +916,9 @@ static int korina_restart(struct net_device *dev)
|
|||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
ret = korina_init(dev);
|
||||
if (ret < 0) {
|
||||
if (korina_init(dev) < 0) {
|
||||
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
|
||||
return ret;
|
||||
return;
|
||||
}
|
||||
korina_multicast_list(dev);
|
||||
|
||||
|
@ -927,8 +926,6 @@ static int korina_restart(struct net_device *dev)
|
|||
enable_irq(lp->ovr_irq);
|
||||
enable_irq(lp->tx_irq);
|
||||
enable_irq(lp->rx_irq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void korina_clear_and_restart(struct net_device *dev, u32 value)
|
||||
|
@ -937,7 +934,7 @@ static void korina_clear_and_restart(struct net_device *dev, u32 value)
|
|||
|
||||
netif_stop_queue(dev);
|
||||
writel(value, &lp->eth_regs->ethintfc);
|
||||
korina_restart(dev);
|
||||
schedule_work(&lp->restart_task);
|
||||
}
|
||||
|
||||
/* Ethernet Tx Underflow interrupt */
|
||||
|
@ -962,11 +959,8 @@ static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
|
|||
static void korina_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct korina_private *lp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
korina_restart(dev);
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
schedule_work(&lp->restart_task);
|
||||
}
|
||||
|
||||
/* Ethernet Rx Overflow interrupt */
|
||||
|
@ -1086,6 +1080,8 @@ static int korina_close(struct net_device *dev)
|
|||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
cancel_work_sync(&lp->restart_task);
|
||||
|
||||
free_irq(lp->rx_irq, dev);
|
||||
free_irq(lp->tx_irq, dev);
|
||||
free_irq(lp->ovr_irq, dev);
|
||||
|
@ -1198,6 +1194,8 @@ static int korina_probe(struct platform_device *pdev)
|
|||
}
|
||||
setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
|
||||
|
||||
INIT_WORK(&lp->restart_task, korina_restart_task);
|
||||
|
||||
printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
|
||||
dev->name);
|
||||
out:
|
||||
|
|
|
@ -322,6 +322,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
|
|||
return -ENOMEM;
|
||||
smc = netdev_priv(dev);
|
||||
smc->p_dev = link;
|
||||
link->priv = dev;
|
||||
|
||||
spin_lock_init(&smc->lock);
|
||||
link->io.NumPorts1 = 16;
|
||||
|
|
|
@ -53,6 +53,9 @@
|
|||
|
||||
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
|
||||
|
||||
/* register definitions for the 973 */
|
||||
#define MII_LXT973_PCR 16 /* Port Configuration Register */
|
||||
#define PCR_FIBER_SELECT 1
|
||||
|
||||
MODULE_DESCRIPTION("Intel LXT PHY driver");
|
||||
MODULE_AUTHOR("Andy Fleming");
|
||||
|
@ -119,6 +122,33 @@ static int lxt971_config_intr(struct phy_device *phydev)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int lxt973_probe(struct phy_device *phydev)
|
||||
{
|
||||
int val = phy_read(phydev, MII_LXT973_PCR);
|
||||
|
||||
if (val & PCR_FIBER_SELECT) {
|
||||
/*
|
||||
* If fiber is selected, then the only correct setting
|
||||
* is 100Mbps, full duplex, and auto negotiation off.
|
||||
*/
|
||||
val = phy_read(phydev, MII_BMCR);
|
||||
val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
|
||||
val &= ~BMCR_ANENABLE;
|
||||
phy_write(phydev, MII_BMCR, val);
|
||||
/* Remember that the port is in fiber mode. */
|
||||
phydev->priv = lxt973_probe;
|
||||
} else {
|
||||
phydev->priv = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lxt973_config_aneg(struct phy_device *phydev)
|
||||
{
|
||||
/* Do nothing if port is in fiber mode. */
|
||||
return phydev->priv ? 0 : genphy_config_aneg(phydev);
|
||||
}
|
||||
|
||||
static struct phy_driver lxt970_driver = {
|
||||
.phy_id = 0x78100000,
|
||||
.name = "LXT970",
|
||||
|
@ -146,6 +176,18 @@ static struct phy_driver lxt971_driver = {
|
|||
.driver = { .owner = THIS_MODULE,},
|
||||
};
|
||||
|
||||
static struct phy_driver lxt973_driver = {
|
||||
.phy_id = 0x00137a10,
|
||||
.name = "LXT973",
|
||||
.phy_id_mask = 0xfffffff0,
|
||||
.features = PHY_BASIC_FEATURES,
|
||||
.flags = 0,
|
||||
.probe = lxt973_probe,
|
||||
.config_aneg = lxt973_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.driver = { .owner = THIS_MODULE,},
|
||||
};
|
||||
|
||||
static int __init lxt_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -157,9 +199,15 @@ static int __init lxt_init(void)
|
|||
ret = phy_driver_register(&lxt971_driver);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
ret = phy_driver_register(&lxt973_driver);
|
||||
if (ret)
|
||||
goto err3;
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
err3:
|
||||
phy_driver_unregister(&lxt971_driver);
|
||||
err2:
|
||||
phy_driver_unregister(&lxt970_driver);
|
||||
err1:
|
||||
return ret;
|
||||
|
@ -169,6 +217,7 @@ static void __exit lxt_exit(void)
|
|||
{
|
||||
phy_driver_unregister(&lxt970_driver);
|
||||
phy_driver_unregister(&lxt971_driver);
|
||||
phy_driver_unregister(&lxt973_driver);
|
||||
}
|
||||
|
||||
module_init(lxt_init);
|
||||
|
|
|
@ -1422,7 +1422,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
|||
flen = len;
|
||||
if (nfree > 0) {
|
||||
if (pch->speed == 0) {
|
||||
flen = totlen/nfree;
|
||||
flen = len/nfree;
|
||||
if (nbigger > 0) {
|
||||
flen++;
|
||||
nbigger--;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue