Rename pinctrl dt binding to restore consistency with

other bcm mobile bindings.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJTDB0VAAoJEPYb7NoJr+nLwGkQALuroLtwGGq0/6Jw4dr9n5UO
 tclAvSbuvUoqwu58cD3grWhkavFKQqR4fJdWcrmczROD9r6rX48QDFe2jNlRjgj/
 yNKkTsd+Ay9I0tPeyx3Buf+tVeQgqsSuG7FJPKgAxMf3HT+tmaWpdIMsjcLOwzdi
 ZMTpIw0cLGMdtr8OSHW/nJuaQV3b+DhnjSLC8QIRg3XgJGjNzJxiKu2Flp0w261P
 Ubzs/PDv83Xl5d972e6JoYR5gGzMwIskm64LmEbHiigLJsvo71oHW57Q2g9asx7h
 stiBzAWF7/Q1xDQ0hTukIOIbX94juR9zSaje66qZ578CFiRAYefginCfyIak9R6R
 Jk0X3krsc0bU9hi4maiYEMVXO3R7K7Rv4BcdoXkg38BXNyINfpdUZlb/5ds6o2FJ
 AZaBgt+/3dl649YaX+ft9VW+1oeV4Hj89sn2pH56NV/rC+dzmh3EHExyHFPJ+sO+
 KKaoXVPf7+TyStrwSxXPSdpUlmGWRWEn9NF1JIGtGNBU+hbiXw7TEOD6DaC8TAN0
 kqAC3ba1YwJalR5rr6UC8ZG2dIZMv3BF7efrM+ewcV7w7F5KF/suJmYIGF+hAFfJ
 Ki7YRRUMS2bVroFjQ7haFGkcwS4JzQnR0hO4O4rNu9JbgBWs0Zv5idj1G/EFpT7a
 hM4ZETYQQb27LVicK/dN
 =2dxs
 -----END PGP SIGNATURE-----

Merge tag 'bcm-for-3.14-pinctrl-reduced-rename' of git://github.com/broadcom/bcm11351 into fixes

Merge 'bcm pinctrl rename' From Christin Daudt:

Rename pinctrl dt binding to restore consistency with other bcm mobile
bindings.

* tag 'bcm-for-3.14-pinctrl-reduced-rename' of git://github.com/broadcom/bcm11351:
  pinctrl: Rename Broadcom Capri pinctrl binding
  pinctrl: refer to updated dt binding string.
  Update dtsi with new pinctrl compatible string
  + Linux 3.14-rc4

Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
Olof Johansson 2014-03-08 22:11:16 -08:00
commit 4058f76247
352 changed files with 3374 additions and 2149 deletions

View File

@ -3,8 +3,7 @@ Date: Nov 2010
Contact: Kay Sievers <kay.sievers@vrfy.org>
Description:
Shows the list of currently configured
tty devices used for the console,
like 'tty1 ttyS0'.
console devices, like 'tty1 ttyS0'.
The last entry in the file is the active
device connected to /dev/console.
The file supports poll() to detect virtual

View File

@ -82,7 +82,19 @@ Most of the hard work is done for the driver in the PCI layer. It simply
has to request that the PCI layer set up the MSI capability for this
device.
4.2.1 pci_enable_msi_range
4.2.1 pci_enable_msi
int pci_enable_msi(struct pci_dev *dev)
A successful call allocates ONE interrupt to the device, regardless
of how many MSIs the device supports. The device is switched from
pin-based interrupt mode to MSI mode. The dev->irq number is changed
to a new number which represents the message signaled interrupt;
consequently, this function should be called before the driver calls
request_irq(), because an MSI is delivered via a vector that is
different from the vector of a pin-based interrupt.
4.2.2 pci_enable_msi_range
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
@ -147,6 +159,11 @@ static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
return pci_enable_msi_range(pdev, nvec, nvec);
}
Note, unlike pci_enable_msi_exact() function, which could be also used to
enable a particular number of MSI-X interrupts, pci_enable_msi_range()
returns either a negative errno or 'nvec' (not negative errno or 0 - as
pci_enable_msi_exact() does).
4.2.1.3 Single MSI mode
The most notorious example of the request type described above is
@ -158,7 +175,27 @@ static int foo_driver_enable_single_msi(struct pci_dev *pdev)
return pci_enable_msi_range(pdev, 1, 1);
}
4.2.2 pci_disable_msi
Note, unlike pci_enable_msi() function, which could be also used to
enable the single MSI mode, pci_enable_msi_range() returns either a
negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
does).
4.2.3 pci_enable_msi_exact
int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
This variation on pci_enable_msi_range() call allows a device driver to
request exactly 'nvec' MSIs.
If this function returns a negative number, it indicates an error and
the driver should not attempt to request any more MSI interrupts for
this device.
By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
returns zero in case of success, which indicates MSI interrupts have been
successfully allocated.
4.2.4 pci_disable_msi
void pci_disable_msi(struct pci_dev *dev)
@ -172,7 +209,7 @@ on any interrupt for which it previously called request_irq().
Failure to do so results in a BUG_ON(), leaving the device with
MSI enabled and thus leaking its vector.
4.2.3 pci_msi_vec_count
4.2.4 pci_msi_vec_count
int pci_msi_vec_count(struct pci_dev *dev)
@ -257,7 +294,7 @@ possible, likely up to the limit returned by pci_msix_vec_count() function:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1, nvec);
}
@ -269,7 +306,7 @@ In this case the function could look like this:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
FOO_DRIVER_MINIMUM_NVEC, nvec);
}
@ -282,10 +319,15 @@ parameters:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
nvec, nvec);
}
Note, unlike pci_enable_msix_exact() function, which could be also used to
enable a particular number of MSI-X interrupts, pci_enable_msix_range()
returns either a negative errno or 'nvec' (not negative errno or 0 - as
pci_enable_msix_exact() does).
4.3.1.3 Specific requirements to the number of MSI-X interrupts
As noted above, there could be devices that can not operate with just any
@ -332,7 +374,64 @@ Note how pci_enable_msix_range() return value is analized for a fallback -
any error code other than -ENOSPC indicates a fatal error and should not
be retried.
4.3.2 pci_disable_msix
4.3.2 pci_enable_msix_exact
int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
This variation on pci_enable_msix_range() call allows a device driver to
request exactly 'nvec' MSI-Xs.
If this function returns a negative number, it indicates an error and
the driver should not attempt to allocate any more MSI-X interrupts for
this device.
By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
returns zero in case of success, which indicates MSI-X interrupts have been
successfully allocated.
Another version of a routine that enables MSI-X mode for a device with
specific requirements described in chapter 4.3.1.3 might look like this:
/*
* Assume 'minvec' and 'maxvec' are non-zero
*/
static int foo_driver_enable_msix(struct foo_adapter *adapter,
int minvec, int maxvec)
{
int rc;
minvec = roundup_pow_of_two(minvec);
maxvec = rounddown_pow_of_two(maxvec);
if (minvec > maxvec)
return -ERANGE;
retry:
rc = pci_enable_msix_exact(adapter->pdev,
adapter->msix_entries, maxvec);
/*
* -ENOSPC is the only error code allowed to be analyzed
*/
if (rc == -ENOSPC) {
if (maxvec == 1)
return -ENOSPC;
maxvec /= 2;
if (minvec > maxvec)
return -ENOSPC;
goto retry;
} else if (rc < 0) {
return rc;
}
return maxvec;
}
4.3.3 pci_disable_msix
void pci_disable_msix(struct pci_dev *dev)

View File

@ -0,0 +1,58 @@
STMicroelectronics SoC DWMAC glue layer controller
The device node has following properties.
Required properties:
- compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or
"st,stid127-dwmac".
- reg : Offset of the glue configuration register map in system
configuration regmap pointed by st,syscon property and size.
- reg-names : Should be "sti-ethconf".
- st,syscon : Should be phandle to system configuration node which
encompases this glue registers.
- st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
wired up in from different sources. One via TXCLK pin and other via CLK_125
pin. This wiring is totally board dependent. However the retiming glue
logic should be configured accordingly. Possible values for this property
"txclk" - if 125Mhz clock is wired up via txclk line.
"clk_125" - if 125Mhz clock is wired up via clk_125 line.
This property is only valid for Giga bit setup( GMII, RGMII), and it is
un-used for non-giga bit (MII and RMII) setups. Also note that internal
clockgen can not generate stable 125Mhz clock.
- st,ext-phyclk: This boolean property indicates who is generating the clock
for tx and rx. This property is only valid for RMII case where the clock can
be generated from the MAC or PHY.
- clock-names: should be "sti-ethclk".
- clocks: Should point to ethernet clockgen which can generate phyclk.
Example:
ethernet0: dwmac@fe810000 {
device_type = "network";
compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
reg-names = "stmmaceth", "sti-ethconf";
interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
phy-mode = "mii";
st,syscon = <&syscfg_rear>;
snps,pbl = <32>;
snps,mixed-burst;
resets = <&softreset STIH416_ETH0_SOFTRESET>;
reset-names = "stmmaceth";
pinctrl-0 = <&pinctrl_mii0>;
pinctrl-names = "default";
clocks = <&CLK_S_GMAC0_PHY>;
clock-names = "stmmaceth";
};

View File

@ -1,4 +1,4 @@
Broadcom Capri Pin Controller
Broadcom BCM281xx Pin Controller
This is a pin controller for the Broadcom BCM281xx SoC family, which includes
BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
@ -7,14 +7,14 @@ BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
Required Properties:
- compatible: Must be "brcm,capri-pinctrl".
- compatible: Must be "brcm,bcm11351-pinctrl"
- reg: Base address of the PAD Controller register block and the size
of the block.
For example, the following is the bare minimum node:
pinctrl@35004800 {
compatible = "brcm,capri-pinctrl";
compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};
@ -119,7 +119,7 @@ Optional Properties (for HDMI pins):
Example:
// pin controller node
pinctrl@35004800 {
compatible = "brcm,capri-pinctrl";
compatible = "brcmbcm11351-pinctrl";
reg = <0x35004800 0x430>;
// pin configuration node

View File

@ -1,45 +0,0 @@
The 3Com Etherlink Plus (3c505) driver.
This driver now uses DMA. There is currently no support for PIO operation.
The default DMA channel is 6; this is _not_ autoprobed, so you must
make sure you configure it correctly. If loading the driver as a
module, you can do this with "modprobe 3c505 dma=n". If the driver is
linked statically into the kernel, you must either use an "ether="
statement on the command line, or change the definition of ELP_DMA in 3c505.h.
The driver will warn you if it has to fall back on the compiled in
default DMA channel.
If no base address is given at boot time, the driver will autoprobe
ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
will try to probe for it.
The driver can be used as a loadable module.
Theoretically, one instance of the driver can now run multiple cards,
in the standard way (when loading a module, say "modprobe 3c505
io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested
this, though.
The driver may now support revision 2 hardware; the dependency on
being able to read the host control register has been removed. This
is also untested, since I don't have a suitable card.
Known problems:
I still see "DMA upload timed out" messages from time to time. These
seem to be fairly non-fatal though.
The card is old and slow.
To do:
Improve probe/setup code
Test multicast and promiscuous operation
Authors:
The driver is mainly written by Craig Southeren, email
<craigs@ineluki.apana.org.au>.
Parts of the driver (adapting the driver to 1.1.4+ kernels,
IRQ/address detection, some changes) and this README by
Juha Laiho <jlaiho@ichaos.nullnet.fi>.
DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
Multicard support, Software configurable DMA, etc., by
Christopher Collins <ccollins@pcug.org.au>

View File

@ -2409,8 +2409,10 @@ F: tools/power/cpupower/
CPUSETS
M: Li Zefan <lizefan@huawei.com>
L: cgroups@vger.kernel.org
W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
F: Documentation/cgroups/cpusets.txt
F: include/linux/cpuset.h
@ -3325,6 +3327,17 @@ S: Maintained
F: include/linux/netfilter_bridge/
F: net/bridge/
ETHERNET PHY LIBRARY
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: include/linux/phy.h
F: include/linux/phy_fixed.h
F: drivers/net/phy/
F: Documentation/networking/phy.txt
F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.cz>
L: linux-ext4@vger.kernel.org
@ -9716,7 +9729,6 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM
P: Silicon Graphics Inc
M: Dave Chinner <david@fromorbit.com>
M: Ben Myers <bpm@sgi.com>
M: xfs@oss.sgi.com
L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*

View File

@ -147,7 +147,7 @@
};
pinctrl@35004800 {
compatible = "brcm,capri-pinctrl";
compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};

View File

@ -52,12 +52,6 @@
};
};
codec: spdif-transmitter {
compatible = "linux,spdif-dit";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hummingboard_spdif>;
};
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@ -111,7 +105,7 @@
};
pinctrl_hummingboard_spdif: hummingboard-spdif {
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
@ -142,6 +136,8 @@
};
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hummingboard_spdif>;
status = "okay";
};

View File

@ -46,12 +46,6 @@
};
};
codec: spdif-transmitter {
compatible = "linux,spdif-dit";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_cubox_i_spdif>;
};
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@ -89,7 +83,7 @@
};
pinctrl_cubox_i_spdif: cubox-i-spdif {
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
@ -121,6 +115,8 @@
};
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_cubox_i_spdif>;
status = "okay";
};

View File

@ -1,2 +0,0 @@
/include/ "tests-phandle.dtsi"
/include/ "tests-interrupts.dtsi"

View File

@ -1,4 +1,4 @@
/include/ "versatile-ab.dts"
#include <versatile-ab.dts>
/ {
model = "ARM Versatile PB";
@ -47,4 +47,4 @@
};
};
/include/ "testcases/tests.dtsi"
#include <testcases.dtsi>

View File

@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
dsb();
}
/*

View File

@ -120,9 +120,12 @@
/*
* 2nd stage PTE definitions for LPAE.
*/
#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */

View File

@ -37,18 +37,9 @@
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
"dsb ishst\n"
SEV
);
#else
__asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n"
SEV
: : "r" (0)
);
#endif
dsb(ishst);
__asm__(SEV);
}
/*

View File

@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
res = memblock_virt_alloc_low(sizeof(*res), 0);
res = memblock_virt_alloc(sizeof(*res), 0);
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;

View File

@ -24,6 +24,7 @@
#include <linux/cpu_pm.h>
#include <linux/suspend.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk/tegra.h>
#include <asm/smp_plat.h>

View File

@ -1358,7 +1358,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
if (gfp & GFP_ATOMIC)
if (!(gfp & __GFP_WAIT))
return __iommu_alloc_atomic(dev, size, handle);
/*

View File

@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
pteval_t prot_pte_s2;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;

View File

@ -232,12 +232,16 @@ __setup("noalign", noalign_setup);
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
.prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
s2_policy(L_PTE_S2_MT_DEV_SHARED) |
L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@ -508,7 +512,8 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
* ARMv6 and above have extended page tables.

View File

@ -208,7 +208,6 @@ __v6_setup:
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
@ -218,6 +217,8 @@ __v6_setup:
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
@ complete invalidations
adr r5, v6_crval
ldmia r5, {r5, r6}
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables

View File

@ -351,7 +351,6 @@ __v7_setup:
4: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
@ -360,6 +359,7 @@ __v7_setup:
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
dsb @ Complete invalidations
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field

View File

@ -11,7 +11,7 @@ all: uImage vmlinux.elf
KBUILD_DEFCONFIG := atstk1002_defconfig
KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
KBUILD_AFLAGS += -mrelax -mno-pic
KBUILD_CFLAGS_MODULE += -mno-relax
LDFLAGS_vmlinux += --relax

View File

@ -11,6 +11,7 @@
#define FRAM_VERSION "1.0"
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/io.h>

View File

@ -17,5 +17,6 @@ generic-y += scatterlist.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
generic-y += xor.h
generic-y += hash.h

View File

@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr);
#define iounmap(addr) \
__iounmap(addr)
#define ioremap_wc ioremap_nocache
#define cached(addr) P1SEGADDR(addr)
#define uncached(addr) P2SEGADDR(addr)

View File

@ -172,10 +172,20 @@ struct eeh_ops {
};
extern struct eeh_ops *eeh_ops;
extern int eeh_subsystem_enabled;
extern bool eeh_subsystem_enabled;
extern raw_spinlock_t confirm_error_lock;
extern int eeh_probe_mode;
static inline bool eeh_enabled(void)
{
return eeh_subsystem_enabled;
}
static inline void eeh_set_enable(bool mode)
{
eeh_subsystem_enabled = mode;
}
#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
/*
* Reads from a device which has been isolated by EEH will return
@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *);
#else /* !CONFIG_EEH */
static inline bool eeh_enabled(void)
{
return false;
}
static inline void eeh_set_enable(bool mode) { }
static inline int eeh_init(void)
{
return 0;

View File

@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_PPC64
return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
#else
return __pte(pte_update(ptep, ~0UL, 0));
#endif

View File

@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
unsigned long set,
int huge)
{
#ifdef PTE_ATOMIC_UPDATES
@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
unsigned long old = pte_val(*ptep);
*ptep = __pte(old & ~clr);
*ptep = __pte((old & ~clr) | set);
#endif
/* huge pages use the old page table lock */
if (!huge)
@ -233,7 +235,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_RW, 0);
pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_RW, 1);
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
/*
@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
return __pte(old);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t * ptep)
{
pte_update(mm, addr, ptep, ~0UL, 0);
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,
extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
unsigned long addr,
pmd_t *pmdp, unsigned long clr);
pmd_t *pmdp,
unsigned long clr,
unsigned long set);
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
}
@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
return;
pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
}
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH

View File

@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte;
}
#define ptep_set_numa ptep_set_numa
static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
VM_BUG_ON(1);
pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
return;
}
#define pmd_numa pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
return pte_numa(pmd_pte(pmd));
}
#define pmdp_set_numa pmdp_set_numa
static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
VM_BUG_ON(1);
pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
return;
}
#define pmd_mknonnuma pmd_mknonnuma
static inline pmd_t pmd_mknonnuma(pmd_t pmd)
{

View File

@ -4,11 +4,11 @@
#ifdef __KERNEL__
/* Default link addresses for the vDSOs */
#define VDSO32_LBASE 0x100000
#define VDSO64_LBASE 0x100000
#define VDSO32_LBASE 0x0
#define VDSO64_LBASE 0x0
/* Default map addresses for 32bit vDSO */
#define VDSO32_MBASE VDSO32_LBASE
#define VDSO32_MBASE 0x100000
#define VDSO_VERSION_STRING LINUX_2.6.15

View File

@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/export.h>
@ -89,7 +90,7 @@
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
int eeh_subsystem_enabled;
bool eeh_subsystem_enabled = false;
EXPORT_SYMBOL(eeh_subsystem_enabled);
/*
@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.total_mmio_ffs++;
if (!eeh_subsystem_enabled)
if (!eeh_enabled())
return 0;
if (!edev) {
@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name)
return -EEXIST;
}
static int eeh_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
eeh_set_enable(false);
return NOTIFY_DONE;
}
static struct notifier_block eeh_reboot_nb = {
.notifier_call = eeh_reboot_notifier,
};
/**
* eeh_init - EEH initialization
*
@ -778,6 +790,14 @@ int eeh_init(void)
if (machine_is(powernv) && cnt++ <= 0)
return ret;
/* Register reboot notifier */
ret = register_reboot_notifier(&eeh_reboot_nb);
if (ret) {
pr_warn("%s: Failed to register notifier (%d)\n",
__func__, ret);
return ret;
}
/* call platform initialization function */
if (!eeh_ops) {
pr_warning("%s: Platform EEH operation not found\n",
@ -822,7 +842,7 @@ int eeh_init(void)
return ret;
}
if (eeh_subsystem_enabled)
if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
pr_warning("EEH: No capable adapters found\n");
@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev)
struct device_node *dn;
struct eeh_dev *edev;
if (!dev || !eeh_subsystem_enabled)
if (!dev || !eeh_enabled())
return;
pr_debug("EEH: Adding device %s\n", pci_name(dev));
@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
if (!dev || !eeh_subsystem_enabled)
if (!dev || !eeh_enabled())
return;
edev = pci_dev_to_eeh_dev(dev);
@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev)
static int proc_eeh_show(struct seq_file *m, void *v)
{
if (0 == eeh_subsystem_enabled) {
if (!eeh_enabled()) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
} else {

View File

@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
*/
_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
lwz r10,THREAD+KSP_LIMIT(r2)
addi r11,r3,THREAD_INFO_GAP
addi r11,r4,THREAD_INFO_GAP
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4
stw r10,8(r1)

View File

@ -6,7 +6,7 @@
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
.incbin "arch/powerpc/kernel/vdso32/vdso32.so"
.incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
.balign PAGE_SIZE
vdso32_end:

View File

@ -6,7 +6,7 @@
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
.incbin "arch/powerpc/kernel/vdso64/vdso64.so"
.incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end:

View File

@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
}
unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long clr)
pmd_t *pmdp, unsigned long clr,
unsigned long set)
{
unsigned long old, tmp;
@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY)
: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
old = pmd_val(*pmdp);
*pmdp = __pmd(old & ~clr);
*pmdp = __pmd((old & ~clr) | set);
#endif
if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(mm, addr, pmdp);
@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT);
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
}
/*
@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long old;
pgtable_t *pgtable_slot;
old = pmd_hugepage_update(mm, addr, pmdp, ~0UL);
old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
/*
* We have pmd == none and we are holding page_table_lock.

View File

@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; npages > 0; --npages) {
pte_update(mm, addr, pte, 0, 0);
pte_update(mm, addr, pte, 0, 0, 0);
addr += PAGE_SIZE;
++pte;
}

View File

@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb,
/* We simply send special EEH event */
if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
(events & OPAL_EVENT_PCI_ERROR))
(events & OPAL_EVENT_PCI_ERROR) &&
eeh_enabled())
eeh_send_failure_event(NULL);
return 0;
@ -489,8 +490,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
static int ioda_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
struct eeh_dev *edev;
struct pci_dev *dev;
struct pci_bus *bus;
int ret;
/*
@ -519,31 +519,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
if (pe->type & EEH_PE_PHB) {
ret = ioda_eeh_phb_reset(hose, option);
} else {
if (pe->type & EEH_PE_DEVICE) {
/*
* If it's device PE, we didn't refer to the parent
* PCI bus yet. So we have to figure it out indirectly.
*/
edev = list_first_entry(&pe->edevs,
struct eeh_dev, list);
dev = eeh_dev_to_pci_dev(edev);
dev = dev->bus->self;
} else {
/*
* If it's bus PE, the parent PCI bus is already there
* and just pick it up.
*/
dev = pe->bus->self;
}
/*
* Do reset based on the fact that the direct upstream bridge
* is root bridge (port) or not.
*/
if (dev->bus->number == 0)
bus = eeh_pe_bus_get(pe);
if (pci_is_root_bus(bus))
ret = ioda_eeh_root_reset(hose, option);
else
ret = ioda_eeh_bridge_reset(hose, dev, option);
ret = ioda_eeh_bridge_reset(hose, bus->self, option);
}
return ret;

View File

@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Enable EEH explicitly so that we will do EEH check
* while accessing I/O stuff
*/
eeh_subsystem_enabled = 1;
eeh_set_enable(true);
/* Save memory bars */
eeh_save_bars(edev);

View File

@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
enable = 1;
if (enable) {
eeh_subsystem_enabled = 1;
eeh_set_enable(true);
eeh_add_to_parent_pe(edev);
pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",

View File

@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
const __be32 *pcie_link_speed_stats;
u32 pcie_link_speed_stats[2];
int rc;
bus = bridge->bus;
@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
pcie_link_speed_stats = of_get_property(pdn,
"ibm,pcie-link-speed-stats", NULL);
if (pcie_link_speed_stats)
rc = of_property_read_u32_array(pdn,
"ibm,pcie-link-speed-stats",
&pcie_link_speed_stats[0], 2);
if (!rc)
break;
}
of_node_put(pdn);
if (!pcie_link_speed_stats) {
if (rc) {
pr_err("no ibm,pcie-link-speed-stats property\n");
return 0;
}
switch (be32_to_cpup(pcie_link_speed_stats)) {
switch (pcie_link_speed_stats[0]) {
case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->max_bus_speed = PCIE_SPEED_5_0GT;
break;
case 0x04:
bus->max_bus_speed = PCIE_SPEED_8_0GT;
break;
default:
bus->max_bus_speed = PCI_SPEED_UNKNOWN;
break;
}
switch (be32_to_cpup(pcie_link_speed_stats)) {
switch (pcie_link_speed_stats[1]) {
case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->cur_bus_speed = PCIE_SPEED_5_0GT;
break;
case 0x04:
bus->cur_bus_speed = PCIE_SPEED_8_0GT;
break;
default:
bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
break;

View File

@ -27,7 +27,7 @@ config SPARC
select RTC_DRV_M48T59
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP

View File

@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
@ -62,6 +63,7 @@ extern unsigned long last_valid_pfn;
static pgd_t *srmmu_swapper_pg_dir;
const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
EXPORT_SYMBOL(sparc32_cachetlb_ops);
#ifdef CONFIG_SMP
const struct sparc32_cachetlb_ops *local_ops;

View File

@ -66,6 +66,6 @@ extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
/* MSR based TSC calibration for Intel Atom SoC platforms */
int try_msr_calibrate_tsc(unsigned long *fast_calibrate);
unsigned long try_msr_calibrate_tsc(void);
#endif /* _ASM_X86_TSC_H */

View File

@ -1521,6 +1521,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
@ -1534,7 +1536,6 @@ static int __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0, 0);
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
if (x86_pmu.event_attrs)
@ -1820,9 +1821,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (ret)
return ret;
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val;
smp_call_function(change_rdpmc, (void *)val, 1);
on_each_cpu(change_rdpmc, (void *)val, 1);
}
return count;

View File

@ -409,6 +409,7 @@ struct x86_pmu {
/*
* sysfs attrs
*/
int attr_rdpmc_broken;
int attr_rdpmc;
struct attribute **format_attrs;
struct attribute **event_attrs;

View File

@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
intel_pmu_enable_all(0);
return handled;
}
if (!status)
goto done;
loops = 0;
again:
@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void)
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
/*
* v2 and above have a perf capabilities MSR
*/
if (version > 1) {
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);

View File

@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),

View File

@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = {
};
static __init void p6_pmu_rdpmc_quirk(void)
{
if (boot_cpu_data.x86_mask < 9) {
/*
* PPro erratum 26; fixed in stepping 9 and above.
*/
pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
x86_pmu.attr_rdpmc_broken = 1;
x86_pmu.attr_rdpmc = 0;
}
}
__init int p6_pmu_init(void)
{
x86_pmu = p6_pmu;
switch (boot_cpu_data.x86_model) {
case 1:
case 3: /* Pentium Pro */
case 5:
case 6: /* Pentium II */
case 7:
case 8:
case 11: /* Pentium III */
case 9:
case 13:
/* Pentium M */
case 1: /* Pentium Pro */
x86_add_quirk(p6_pmu_rdpmc_quirk);
break;
case 3: /* Pentium II - Klamath */
case 5: /* Pentium II - Deschutes */
case 6: /* Pentium II - Mendocino */
break;
case 7: /* Pentium III - Katmai */
case 8: /* Pentium III - Coppermine */
case 10: /* Pentium III Xeon */
case 11: /* Pentium III - Tualatin */
break;
case 9: /* Pentium M - Banias */
case 13: /* Pentium M - Dothan */
break;
default:
pr_cont("unsupported p6 CPU model %d ",
boot_cpu_data.x86_model);
pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model);
return -ENODEV;
}
x86_pmu = p6_pmu;
memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
return 0;
}

View File

@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
flag |= __GFP_ZERO;
again:
page = NULL;
if (!(flag & GFP_ATOMIC))
/* CMA can be used only in the context which permits sleeping */
if (flag & __GFP_WAIT)
page = dma_alloc_from_contiguous(dev, count, get_order(size));
/* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)

View File

@ -653,13 +653,10 @@ unsigned long native_calibrate_tsc(void)
/* Calibrate TSC using MSR for Intel Atom SoCs */
local_irq_save(flags);
i = try_msr_calibrate_tsc(&fast_calibrate);
fast_calibrate = try_msr_calibrate_tsc();
local_irq_restore(flags);
if (i >= 0) {
if (i == 0)
pr_warn("Fast TSC calibration using MSR failed\n");
if (fast_calibrate)
return fast_calibrate;
}
local_irq_save(flags);
fast_calibrate = quick_pit_calibrate();

View File

@ -53,7 +53,7 @@ static struct freq_desc freq_desc_tables[] = {
/* TNG */
{ 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
/* VLV2 */
{ 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
{ 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
/* ANN */
{ 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
};
@ -77,21 +77,18 @@ static int match_cpu(u8 family, u8 model)
/*
* Do MSR calibration only for known/supported CPUs.
* Return values:
* -1: CPU is unknown/unsupported for MSR based calibration
* 0: CPU is known/supported, but calibration failed
* 1: CPU is known/supported, and calibration succeeded
*
* Returns the calibration value or 0 if MSR calibration failed.
*/
int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
unsigned long try_msr_calibrate_tsc(void)
{
int cpu_index;
u32 lo, hi, ratio, freq_id, freq;
unsigned long res;
int cpu_index;
cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
if (cpu_index < 0)
return -1;
*fast_calibrate = 0;
return 0;
if (freq_desc_tables[cpu_index].msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
@ -103,7 +100,7 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
if (!ratio)
return 0;
goto fail;
/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
@ -112,16 +109,19 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
freq_id, freq);
if (!freq)
return 0;
goto fail;
/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
*fast_calibrate = freq * ratio;
pr_info("TSC runs at %lu KHz\n", *fast_calibrate);
res = freq * ratio;
pr_info("TSC runs at %lu KHz\n", res);
#ifdef CONFIG_X86_LOCAL_APIC
lapic_timer_frequency = (freq * 1000) / HZ;
pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
#endif
return res;
return 1;
fail:
pr_warn("Fast TSC calibration using MSR failed\n");
return 0;
}

View File

@ -243,6 +243,8 @@ static int acpi_ac_resume(struct device *dev)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
return 0;
}
#else
#define acpi_ac_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);

View File

@ -841,6 +841,8 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery);
return 0;
}
#else
#define acpi_battery_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);

View File

@ -260,14 +260,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{
.callback = dmi_disable_osi_win8,
.ident = "Dell Inspiron 15R SE",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "ThinkPad Edge E530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@ -322,56 +314,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "HP ProBook 2013 models",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "HP EliteBook 2013 models",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "HP ZBook 14",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "HP ZBook 15",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "HP ZBook 17",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
},
},
{
.callback = dmi_disable_osi_win8,
.ident = "HP EliteBook 8780w",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
},
},
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.

View File

@ -80,6 +80,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev);
#else
#define acpi_button_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);

View File

@ -713,13 +713,11 @@ static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *tmp;
struct dock_station *dock_station = dev->platform_data;
struct acpi_device *adev = NULL;
if (!acpi_bus_get_device(dock_station->handle, &tmp))
return snprintf(buf, PAGE_SIZE, "1\n");
return snprintf(buf, PAGE_SIZE, "0\n");
acpi_bus_get_device(dock_station->handle, &adev);
return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);

View File

@ -55,6 +55,9 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
#else
#define acpi_fan_suspend NULL
#define acpi_fan_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);

View File

@ -430,6 +430,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
pin_name(pin));
}
kfree(entry);
return 0;
}

View File

@ -450,7 +450,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
if (sscanf(buf, "%ld\n", &x) == 1)
if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm_capacity = x /
(1000 * acpi_battery_scale(battery));
if (battery->present)
@ -668,6 +668,8 @@ static int acpi_sbs_resume(struct device *dev)
acpi_sbs_callback(sbs);
return 0;
}
#else
#define acpi_sbs_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);

View File

@ -102,6 +102,8 @@ MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev);
#else
#define acpi_thermal_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);

View File

@ -81,11 +81,12 @@ static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
* For Windows 8 systems: if set ture and the GPU driver has
* registered a backlight interface, skip registering ACPI video's.
* For Windows 8 systems: used to decide if video module
* should skip registering backlight interface of its own.
*/
static bool use_native_backlight = false;
module_param(use_native_backlight, bool, 0644);
static int use_native_backlight_param = -1;
module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
static bool use_native_backlight_dmi = false;
static int register_count;
static struct mutex video_list_lock;
@ -231,9 +232,17 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
static bool acpi_video_use_native_backlight(void)
{
if (use_native_backlight_param != -1)
return use_native_backlight_param;
else
return use_native_backlight_dmi;
}
static bool acpi_video_verify_backlight_support(void)
{
if (acpi_osi_is_win8() && use_native_backlight &&
if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
@ -398,6 +407,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
{
use_native_backlight_dmi = true;
return 0;
}
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@ -442,6 +457,120 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "ThinkPad T430s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "ThinkPad X230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "ThinkPad X1 Carbon",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "Lenovo Yoga 13",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "Dell Inspiron 7520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "Acer Aspire 5733Z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "Acer Aspire V5-431",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 4340s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 2013 models",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP EliteBook 2013 models",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ZBook 14",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ZBook 15",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP ZBook 17",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
},
},
{
.callback = video_set_use_native_backlight,
.ident = "HP EliteBook 8780w",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
},
},
{}
};
@ -685,6 +814,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
u32 value;
if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
@ -715,7 +845,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
printk(KERN_ERR PREFIX "Invalid data\n");
continue;
}
br->levels[count] = (u32) o->integer.value;
value = (u32) o->integer.value;
/* Skip duplicate entries */
if (count > 2 && br->levels[count - 1] == value)
continue;
br->levels[count] = value;
if (br->levels[count] > max_level)
max_level = br->levels[count];

View File

@ -168,22 +168,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
{
.callback = video_detect_force_vendor,
.ident = "HP EliteBook Revolve 810",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
},
},
{
.callback = video_detect_force_vendor,
.ident = "Lenovo Yoga 13",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
},
},
{ },
};

View File

@ -247,6 +247,7 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
select GENERIC_PHY
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] PCI(-X) chips,

View File

@ -61,6 +61,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_noncq] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
[board_ahci_nosntf] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
@ -452,6 +460,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
/*
* Samsung SSDs found on some macbooks. NCQ times out.
* https://bugzilla.kernel.org/show_bug.cgi?id=60731
*/
{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@ -1170,8 +1184,10 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
nvec = rc;
rc = pci_enable_msi_block(pdev, nvec);
if (rc)
if (rc < 0)
goto intx;
else if (rc > 0)
goto single_msi;
return nvec;

View File

@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
* otherwise. Don't try hard to recover it.
*/
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
} else if (vendor == 0x197b && devid == 0x2352) {
/* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
} else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
/*
* 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
* 0x0325: jmicron JMB394.
*/
ata_for_each_link(link, ap, EDGE) {
/* SRST breaks detection and disks get misclassified
* LPM disabled to avoid potential problems

View File

@ -119,7 +119,9 @@ static int pata_imx_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@ -212,7 +214,9 @@ static int pata_imx_resume(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
clk_prepare_enable(priv->clk);
int ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);

View File

@ -4104,7 +4104,6 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!hpriv->port_phys)
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
@ -4132,13 +4131,18 @@ static int mv_platform_probe(struct platform_device *pdev)
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
if (rc != -EPROBE_DEFER)
dev_warn(&pdev->dev, "error getting phy %d",
rc);
dev_warn(&pdev->dev, "error getting phy %d", rc);
/* Cleanup only the initialized ports */
hpriv->n_ports = port;
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
}
/* All the ports have been initialized */
hpriv->n_ports = n_ports;
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@ -4176,7 +4180,7 @@ err:
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
for (port = 0; port < n_ports; port++) {
for (port = 0; port < hpriv->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);

View File

@ -157,6 +157,7 @@ static const struct sil_drivelist {
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
{ "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
{ }
};

View File

@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s)
if (ret)
return ret;
seq_printf(s, "\nDma-buf Objects:\n");
seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
seq_puts(s, "\nDma-buf Objects:\n");
seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
seq_printf(s,
seq_puts(s,
"\tERROR locking buffer object: skipping\n");
continue;
}
seq_printf(s, "\t");
seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
buf_obj->exp_name, buf_obj->size,
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
(long)(buf_obj->file->f_count.counter));
(long)(buf_obj->file->f_count.counter),
buf_obj->exp_name);
seq_printf(s, "\t\tAttached Devices:\n");
seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
seq_printf(s, "\t\t");
seq_puts(s, "\t");
seq_printf(s, "%s\n", attach_obj->dev->init_name);
seq_printf(s, "%s\n", dev_name(attach_obj->dev));
attach_count++;
}
seq_printf(s, "\n\t\tTotal %d devices attached\n",
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;

View File

@ -1323,7 +1323,6 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
up_read(&policy->rwsem);
if (cpu != policy->cpu) {
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);

View File

@ -36,6 +36,8 @@
#define BYT_RATIOS 0x66a
#define BYT_VIDS 0x66b
#define BYT_TURBO_RATIOS 0x66c
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@ -357,7 +359,7 @@ static int byt_get_min_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
return value & 0xFF;
return (value >> 8) & 0xFF;
}
static int byt_get_max_pstate(void)
@ -367,6 +369,13 @@ static int byt_get_max_pstate(void)
return (value >> 16) & 0xFF;
}
static int byt_get_turbo_pstate(void)
{
u64 value;
rdmsrl(BYT_TURBO_RATIOS, value);
return value & 0x3F;
}
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
{
u64 val;
@ -469,7 +478,7 @@ static struct cpu_defaults byt_params = {
.funcs = {
.get_max = byt_get_max_pstate,
.get_min = byt_get_min_pstate,
.get_turbo = byt_get_max_pstate,
.get_turbo = byt_get_turbo_pstate,
.set = byt_set_pstate,
.get_vid = byt_get_vid,
},

View File

@ -1076,7 +1076,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
int rc;
int rc, cpu;
smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
if (rc)
@ -1140,7 +1140,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
per_cpu(powernow_data, pol->cpu) = data;
/* Point all the CPUs in this policy to the same data */
for_each_cpu(cpu, pol->cpus)
per_cpu(powernow_data, cpu) = data;
return 0;
@ -1155,6 +1157,7 @@ err_out:
static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
int cpu;
if (!data)
return -EINVAL;
@ -1165,7 +1168,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
per_cpu(powernow_data, pol->cpu) = NULL;
for_each_cpu(cpu, pol->cpus)
per_cpu(powernow_data, cpu) = NULL;
return 0;
}

View File

@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
case DRM_CAP_CURSOR_WIDTH:
if (dev->mode_config.cursor_width)
req->value = dev->mode_config.cursor_width;
else
req->value = 64;
break;
case DRM_CAP_CURSOR_HEIGHT:
if (dev->mode_config.cursor_height)
req->value = dev->mode_config.cursor_height;
else
req->value = 64;
break;
default:
return -EINVAL;
}

View File

@ -1151,8 +1151,10 @@ tda998x_encoder_init(struct i2c_client *client,
priv->current_page = 0xff;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
if (!priv->cec)
if (!priv->cec) {
kfree(priv);
return -ENODEV;
}
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;

View File

@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
if (ring->id == RCS)
len += 6;
/*
* BSpec MI_DISPLAY_FLIP for IVB:
* "The full packet must be contained within the same cache line."
*
* Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
* cacheline, if we ever start emitting more commands before
* the MI_DISPLAY_FLIP we may need to first emit everything else,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
ret = intel_ring_cacheline_align(ring);
if (ret)
goto err_unpin;
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;

View File

@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
int retry;
if (WARN_ON(send_bytes > 16))
return -E2BIG;
@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
break;
return send_bytes;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
usleep_range(400, 500);
else
return -EIO;
}
return send_bytes;
DRM_ERROR("too many retries, giving up\n");
return -EIO;
}
/* Write a single byte to the aux channel in native mode */
@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
int reply_bytes;
uint8_t ack;
int ret;
int retry;
if (WARN_ON(recv_bytes > 19))
return -E2BIG;
@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
for (;;) {
for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
return ret - 1;
}
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
udelay(100);
usleep_range(400, 500);
else
return -EIO;
}
DRM_ERROR("too many retries, giving up\n");
return -EIO;
}
static int

View File

@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
return 0;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
{
int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
return 0;
ret = intel_ring_begin(ring, num_dwords);
if (ret)
return ret;
while (num_dwords--)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;

View File

@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{

View File

@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
nouveau-y += core/subdev/mc/nv44.o
nouveau-y += core/subdev/mc/nv4c.o
nouveau-y += core/subdev/mc/nv50.o
nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o

View File

@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;

View File

@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
if (conf != ~0) {
if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
u32 soff = (ffs(outp.or) - 1) * 0x08;
u32 ctrl = nv_rd32(priv, 0x610798 + soff);
u32 ctrl = nv_rd32(priv, 0x610794 + soff);
u32 datarate;
switch ((ctrl & 0x000f0000) >> 16) {

View File

@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
nv_error(priv, "runlist %d update timeout\n", engine);
mutex_unlock(&nv_subdev(priv)->mutex);
}

View File

@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
ustatus &= ~0x04030000;
}
if (ustatus && display) {
nv_error("%s - TP%d:", name, i);
nv_error(priv, "%s - TP%d:", name, i);
nouveau_bitfield_print(nv50_mpc_traps, ustatus);
pr_cont("\n");
ustatus = 0;

View File

@ -47,6 +47,7 @@ struct nouveau_mc_oclass {
extern struct nouveau_oclass *nv04_mc_oclass;
extern struct nouveau_oclass *nv40_mc_oclass;
extern struct nouveau_oclass *nv44_mc_oclass;
extern struct nouveau_oclass *nv4c_mc_oclass;
extern struct nouveau_oclass *nv50_mc_oclass;
extern struct nouveau_oclass *nv94_mc_oclass;
extern struct nouveau_oclass *nv98_mc_oclass;

View File

@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
u16 pcir;
int i;
/* there is no prom on nv4x IGP's */
if (device->card_type == NV_40 && device->chipset >= 0x4c)
return;
/* enable access to rom */
if (device->card_type >= NV_50)
pcireg = 0x088050;

View File

@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) {
.fini = _nouveau_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
.base.ram = &nv10_ram_oclass,
.base.ram = &nv1a_ram_oclass,
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,

View File

@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
extern const struct nouveau_mc_intr nv04_mc_intr[];
int nv04_mc_init(struct nouveau_object *);
void nv40_mc_msi_rearm(struct nouveau_mc *);
int nv44_mc_init(struct nouveau_object *object);
int nv50_mc_init(struct nouveau_object *);
extern const struct nouveau_mc_intr nv50_mc_intr[];
extern const struct nouveau_mc_intr nvc0_mc_intr[];

View File

@ -24,7 +24,7 @@
#include "nv04.h"
static int
int
nv44_mc_init(struct nouveau_object *object)
{
struct nv04_mc_priv *priv = (void *)object;

View File

@ -0,0 +1,45 @@
/*
* Copyright 2014 Ilia Mirkin
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ilia Mirkin
*/
#include "nv04.h"
static void
nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
{
struct nv04_mc_priv *priv = (void *)pmc;
nv_wr08(priv, 0x088050, 0xff);
}
struct nouveau_oclass *
nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x4c),
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_mc_ctor,
.dtor = _nouveau_mc_dtor,
.init = nv44_mc_init,
.fini = _nouveau_mc_fini,
},
.intr = nv04_mc_intr,
.msi_rearm = nv4c_mc_msi_rearm,
}.base;

View File

@ -106,6 +106,29 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
return 0;
}
/*
* On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special
* requirements on the fourth parameter, so a private implementation
* instead of using acpi_check_dsm().
*/
static int nouveau_check_optimus_dsm(acpi_handle handle)
{
int result;
/*
* Function 0 returns a Buffer containing available functions.
* The args parameter is ignored for function 0, so just put 0 in it
*/
if (nouveau_optimus_dsm(handle, 0, 0, &result))
return 0;
/*
* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
* If the n-th bit is enabled, function n is supported
*/
return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
}
static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
int ret = 0;
@ -207,8 +230,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100,
1 << NOUVEAU_DSM_OPTIMUS_CAPS))
if (nouveau_check_optimus_dsm(dhandle))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval & NOUVEAU_DSM_HAS_OPT) {

View File

@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
if (!node->memtype)
if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */

View File

@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail_device;
dev->irq_enabled = true;
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_object *device;
dev->irq_enabled = false;
device = drm->client.base.device;
drm_put_dev(dev);

View File

@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state)
{
struct nouveau_device *device = nouveau_dev(priv);
if (device->chipset >= 0x40)
if (device->card_type == NV_40 && device->chipset >= 0x4c)
nv_wr32(device, 0x088060, state);
else if (device->chipset >= 0x40)
nv_wr32(device, 0x088054, state);
else
nv_wr32(device, 0x001854, state);

View File

@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
int bpc = radeon_get_monitor_bpc(connector);
int bpc = radeon_crtc->bpc;
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
/* reset the pll flags */
@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
/* Set NUM_BANKS. */
if (rdev->family >= CHIP_BONAIRE) {
if (rdev->family >= CHIP_TAHITI) {
unsigned tileb, index, num_banks, tile_split_bytes;
/* Calculate the macrotile mode index. */
@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
if (rdev->family >= CHIP_BONAIRE)
num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
else
num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
/* SI and older. */
if (rdev->family >= CHIP_TAHITI)
tmp = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
/* NI and older. */
if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;

View File

@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
if (connector)
bpc = radeon_get_monitor_bpc(connector);
if (encoder->crtc) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
bpc = radeon_crtc->bpc;
}
switch (bpc) {
case 0:

View File

@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {

View File

@ -135,6 +135,9 @@ extern int radeon_hard_reset;
/* R600+ */
#define R600_RING_TYPE_UVD_INDEX 5
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
/*
* Semaphores.
*/
/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;

View File

@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_crtc->max_cursor_width = CURSOR_WIDTH;
radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
}
dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;

View File

@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
}
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;

Some files were not shown because too many files have changed in this diff Show More