Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of easy overlapping changes in the confict resolutions here. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5b79c2af66
|
@ -478,6 +478,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
|||
/sys/devices/system/cpu/vulnerabilities/meltdown
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
|
|
@ -2680,6 +2680,9 @@
|
|||
allow data leaks with this option, which is equivalent
|
||||
to spectre_v2=off.
|
||||
|
||||
nospec_store_bypass_disable
|
||||
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
|
||||
|
||||
noxsave [BUGS=X86] Disables x86 extended register state save
|
||||
and restore using xsave. The kernel will fallback to
|
||||
enabling legacy floating-point and sse state.
|
||||
|
@ -4025,6 +4028,48 @@
|
|||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
|
||||
spec_store_bypass_disable=
|
||||
[HW] Control Speculative Store Bypass (SSB) Disable mitigation
|
||||
(Speculative Store Bypass vulnerability)
|
||||
|
||||
Certain CPUs are vulnerable to an exploit against a
|
||||
a common industry wide performance optimization known
|
||||
as "Speculative Store Bypass" in which recent stores
|
||||
to the same memory location may not be observed by
|
||||
later loads during speculative execution. The idea
|
||||
is that such stores are unlikely and that they can
|
||||
be detected prior to instruction retirement at the
|
||||
end of a particular speculation execution window.
|
||||
|
||||
In vulnerable processors, the speculatively forwarded
|
||||
store can be used in a cache side channel attack, for
|
||||
example to read memory to which the attacker does not
|
||||
directly have access (e.g. inside sandboxed code).
|
||||
|
||||
This parameter controls whether the Speculative Store
|
||||
Bypass optimization is used.
|
||||
|
||||
on - Unconditionally disable Speculative Store Bypass
|
||||
off - Unconditionally enable Speculative Store Bypass
|
||||
auto - Kernel detects whether the CPU model contains an
|
||||
implementation of Speculative Store Bypass and
|
||||
picks the most appropriate mitigation. If the
|
||||
CPU is not vulnerable, "off" is selected. If the
|
||||
CPU is vulnerable the default mitigation is
|
||||
architecture and Kconfig dependent. See below.
|
||||
prctl - Control Speculative Store Bypass per thread
|
||||
via prctl. Speculative Store Bypass is enabled
|
||||
for a process by default. The state of the control
|
||||
is inherited on fork.
|
||||
seccomp - Same as "prctl" above, but all seccomp threads
|
||||
will disable SSB unless they explicitly opt out.
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spec_store_bypass_disable=auto.
|
||||
|
||||
Default mitigations:
|
||||
X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
|
||||
|
||||
spia_io_base= [HW,MTD]
|
||||
spia_fio_base=
|
||||
spia_pedr=
|
||||
|
|
|
@ -300,12 +300,6 @@ unattached instance are:
|
|||
The ioctl calls available on an instance of /dev/ppp attached to a
|
||||
channel are:
|
||||
|
||||
* PPPIOCDETACH detaches the instance from the channel. This ioctl is
|
||||
deprecated since the same effect can be achieved by closing the
|
||||
instance. In order to prevent possible races this ioctl will fail
|
||||
with an EINVAL error if more than one file descriptor refers to this
|
||||
instance (i.e. as a result of dup(), dup2() or fork()).
|
||||
|
||||
* PPPIOCCONNECT connects this channel to a PPP interface. The
|
||||
argument should point to an int containing the interface unit
|
||||
number. It will return an EINVAL error if the channel is already
|
||||
|
|
|
@ -19,6 +19,7 @@ place where this information is gathered.
|
|||
no_new_privs
|
||||
seccomp_filter
|
||||
unshare
|
||||
spec_ctrl
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
===================
|
||||
Speculation Control
|
||||
===================
|
||||
|
||||
Quite some CPUs have speculation-related misfeatures which are in
|
||||
fact vulnerabilities causing data leaks in various forms even across
|
||||
privilege domains.
|
||||
|
||||
The kernel provides mitigation for such vulnerabilities in various
|
||||
forms. Some of these mitigations are compile-time configurable and some
|
||||
can be supplied on the kernel command line.
|
||||
|
||||
There is also a class of mitigations which are very expensive, but they can
|
||||
be restricted to a certain set of processes or tasks in controlled
|
||||
environments. The mechanism to control these mitigations is via
|
||||
:manpage:`prctl(2)`.
|
||||
|
||||
There are two prctl options which are related to this:
|
||||
|
||||
* PR_GET_SPECULATION_CTRL
|
||||
|
||||
* PR_SET_SPECULATION_CTRL
|
||||
|
||||
PR_GET_SPECULATION_CTRL
|
||||
-----------------------
|
||||
|
||||
PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
|
||||
which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
|
||||
the following meaning:
|
||||
|
||||
==== ===================== ===================================================
|
||||
Bit Define Description
|
||||
==== ===================== ===================================================
|
||||
0 PR_SPEC_PRCTL Mitigation can be controlled per task by
|
||||
PR_SET_SPECULATION_CTRL.
|
||||
1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
|
||||
disabled.
|
||||
2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
|
||||
enabled.
|
||||
3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
|
||||
subsequent prctl(..., PR_SPEC_ENABLE) will fail.
|
||||
==== ===================== ===================================================
|
||||
|
||||
If all bits are 0 the CPU is not affected by the speculation misfeature.
|
||||
|
||||
If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
|
||||
available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
|
||||
misfeature will fail.
|
||||
|
||||
PR_SET_SPECULATION_CTRL
|
||||
-----------------------
|
||||
|
||||
PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
|
||||
is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
|
||||
in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
|
||||
PR_SPEC_FORCE_DISABLE.
|
||||
|
||||
Common error codes
|
||||
------------------
|
||||
======= =================================================================
|
||||
Value Meaning
|
||||
======= =================================================================
|
||||
EINVAL The prctl is not implemented by the architecture or unused
|
||||
prctl(2) arguments are not 0.
|
||||
|
||||
ENODEV arg2 is selecting a not supported speculation misfeature.
|
||||
======= =================================================================
|
||||
|
||||
PR_SET_SPECULATION_CTRL error codes
|
||||
-----------------------------------
|
||||
======= =================================================================
|
||||
Value Meaning
|
||||
======= =================================================================
|
||||
0 Success
|
||||
|
||||
ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
|
||||
PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
|
||||
|
||||
ENXIO Control of the selected speculation misfeature is not possible.
|
||||
See PR_GET_SPECULATION_CTRL.
|
||||
|
||||
EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
|
||||
tried to enable it again.
|
||||
======= =================================================================
|
||||
|
||||
Speculation misfeature controls
|
||||
-------------------------------
|
||||
- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
|
||||
|
||||
Invocations:
|
||||
* prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
|
30
MAINTAINERS
30
MAINTAINERS
|
@ -2332,7 +2332,7 @@ F: drivers/gpio/gpio-ath79.c
|
|||
F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
|
||||
|
||||
ATHEROS ATH GENERIC UTILITIES
|
||||
M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/ath/*
|
||||
|
@ -2347,7 +2347,7 @@ S: Maintained
|
|||
F: drivers/net/wireless/ath/ath5k/
|
||||
|
||||
ATHEROS ATH6KL WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@qca.qualcomm.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath6kl
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
|
@ -5386,7 +5386,6 @@ S: Maintained
|
|||
F: drivers/iommu/exynos-iommu.c
|
||||
|
||||
EZchip NPS platform support
|
||||
M: Elad Kanfi <eladkan@mellanox.com>
|
||||
M: Vineet Gupta <vgupta@synopsys.com>
|
||||
S: Supported
|
||||
F: arch/arc/plat-eznps
|
||||
|
@ -6502,9 +6501,15 @@ F: Documentation/networking/hinic.txt
|
|||
F: drivers/net/ethernet/huawei/hinic/
|
||||
|
||||
HUGETLB FILESYSTEM
|
||||
M: Nadia Yvette Chambers <nyc@holomorphy.com>
|
||||
M: Mike Kravetz <mike.kravetz@oracle.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: fs/hugetlbfs/
|
||||
F: mm/hugetlb.c
|
||||
F: include/linux/hugetlb.h
|
||||
F: Documentation/admin-guide/mm/hugetlbpage.rst
|
||||
F: Documentation/vm/hugetlbfs_reserv.rst
|
||||
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
||||
|
||||
HVA ST MEDIA DRIVER
|
||||
M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
|
||||
|
@ -9020,7 +9025,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||
F: drivers/net/ethernet/mellanox/mlx5/core/en_*
|
||||
|
||||
MELLANOX ETHERNET INNOVA DRIVERS
|
||||
M: Boris Pismenny <borisp@mellanox.com>
|
||||
R: Boris Pismenny <borisp@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
|
@ -9030,6 +9035,15 @@ F: drivers/net/ethernet/mellanox/mlx5/core/accel/*
|
|||
F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
|
||||
F: include/linux/mlx5/mlx5_ifc_fpga.h
|
||||
|
||||
MELLANOX ETHERNET INNOVA IPSEC DRIVER
|
||||
R: Boris Pismenny <borisp@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
|
||||
F: drivers/net/ethernet/mellanox/mlx5/core/ipsec*
|
||||
|
||||
MELLANOX ETHERNET SWITCH DRIVERS
|
||||
M: Jiri Pirko <jiri@mellanox.com>
|
||||
M: Ido Schimmel <idosch@mellanox.com>
|
||||
|
@ -9077,7 +9091,6 @@ F: include/uapi/rdma/mlx4-abi.h
|
|||
|
||||
MELLANOX MLX5 core VPI driver
|
||||
M: Saeed Mahameed <saeedm@mellanox.com>
|
||||
M: Matan Barak <matanb@mellanox.com>
|
||||
M: Leon Romanovsky <leonro@mellanox.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
|
@ -9088,7 +9101,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/
|
|||
F: include/linux/mlx5/
|
||||
|
||||
MELLANOX MLX5 IB driver
|
||||
M: Matan Barak <matanb@mellanox.com>
|
||||
M: Leon Romanovsky <leonro@mellanox.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.mellanox.com
|
||||
|
@ -11628,7 +11640,7 @@ S: Maintained
|
|||
F: drivers/media/tuners/qt1010*
|
||||
|
||||
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@qca.qualcomm.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: ath10k@lists.infradead.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath10k
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
|
||||
|
@ -11679,7 +11691,7 @@ S: Maintained
|
|||
F: drivers/media/platform/qcom/venus/
|
||||
|
||||
QUALCOMM WCN36XX WIRELESS DRIVER
|
||||
M: Eugene Krasnikov <k.eugene.e@gmail.com>
|
||||
M: Kalle Valo <kvalo@codeaurora.org>
|
||||
L: wcn36xx@lists.infradead.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/wcn36xx
|
||||
T: git git://github.com/KrasnikovEugene/wcn36xx.git
|
||||
|
|
|
@ -211,6 +211,7 @@ config ALPHA_EIGER
|
|||
config ALPHA_JENSEN
|
||||
bool "Jensen"
|
||||
depends on BROKEN
|
||||
select DMA_DIRECT_OPS
|
||||
help
|
||||
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
|
||||
of the first-generation Alpha systems. A number of these systems
|
||||
|
|
|
@ -2,11 +2,15 @@
|
|||
#ifndef _ALPHA_DMA_MAPPING_H
|
||||
#define _ALPHA_DMA_MAPPING_H
|
||||
|
||||
extern const struct dma_map_ops *dma_ops;
|
||||
extern const struct dma_map_ops alpha_pci_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return dma_ops;
|
||||
#ifdef CONFIG_ALPHA_JENSEN
|
||||
return &dma_direct_ops;
|
||||
#else
|
||||
return &alpha_pci_ops;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _ALPHA_DMA_MAPPING_H */
|
||||
|
|
|
@ -37,20 +37,20 @@ unsigned int ioread32(void __iomem *addr)
|
|||
|
||||
void iowrite8(u8 b, void __iomem *addr)
|
||||
{
|
||||
IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
|
||||
mb();
|
||||
IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
|
||||
}
|
||||
|
||||
void iowrite16(u16 b, void __iomem *addr)
|
||||
{
|
||||
IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
|
||||
mb();
|
||||
IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
|
||||
}
|
||||
|
||||
void iowrite32(u32 b, void __iomem *addr)
|
||||
{
|
||||
IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
|
||||
mb();
|
||||
IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ioread8);
|
||||
|
@ -176,26 +176,26 @@ u64 readq(const volatile void __iomem *addr)
|
|||
|
||||
void writeb(u8 b, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writeb(b, addr);
|
||||
mb();
|
||||
__raw_writeb(b, addr);
|
||||
}
|
||||
|
||||
void writew(u16 b, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writew(b, addr);
|
||||
mb();
|
||||
__raw_writew(b, addr);
|
||||
}
|
||||
|
||||
void writel(u32 b, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writel(b, addr);
|
||||
mb();
|
||||
__raw_writel(b, addr);
|
||||
}
|
||||
|
||||
void writeq(u64 b, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writeq(b, addr);
|
||||
mb();
|
||||
__raw_writeq(b, addr);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(readb);
|
||||
|
|
|
@ -102,36 +102,3 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
|
|||
else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (!dev || *dev->dma_mask >= 0xffffffffUL)
|
||||
gfp &= ~GFP_DMA;
|
||||
ret = (void *)__get_free_pages(gfp, get_order(size));
|
||||
if (ret) {
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = virt_to_phys(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int alpha_noop_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return mask < 0x00ffffffUL ? 0 : 1;
|
||||
}
|
||||
|
||||
const struct dma_map_ops alpha_noop_ops = {
|
||||
.alloc = alpha_noop_alloc_coherent,
|
||||
.free = dma_noop_free_coherent,
|
||||
.map_page = dma_noop_map_page,
|
||||
.map_sg = dma_noop_map_sg,
|
||||
.mapping_error = dma_noop_mapping_error,
|
||||
.dma_supported = alpha_noop_supported,
|
||||
};
|
||||
|
||||
const struct dma_map_ops *dma_ops = &alpha_noop_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
|
|
@ -950,6 +950,4 @@ const struct dma_map_ops alpha_pci_ops = {
|
|||
.mapping_error = alpha_pci_mapping_error,
|
||||
.dma_supported = alpha_pci_supported,
|
||||
};
|
||||
|
||||
const struct dma_map_ops *dma_ops = &alpha_pci_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
EXPORT_SYMBOL(alpha_pci_ops);
|
||||
|
|
|
@ -466,12 +466,6 @@ void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
|
|||
void __init dma_contiguous_remap(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!dma_mmu_remap_num)
|
||||
return;
|
||||
|
||||
/* call flush_cache_all() since CMA area would be large enough */
|
||||
flush_cache_all();
|
||||
for (i = 0; i < dma_mmu_remap_num; i++) {
|
||||
phys_addr_t start = dma_mmu_remap[i].base;
|
||||
phys_addr_t end = start + dma_mmu_remap[i].size;
|
||||
|
@ -504,15 +498,7 @@ void __init dma_contiguous_remap(void)
|
|||
flush_tlb_kernel_range(__phys_to_virt(start),
|
||||
__phys_to_virt(end));
|
||||
|
||||
/*
|
||||
* All the memory in CMA region will be on ZONE_MOVABLE.
|
||||
* If that zone is considered as highmem, the memory in CMA
|
||||
* region is also considered as highmem even if it's
|
||||
* physical address belong to lowmem. In this case,
|
||||
* re-mapping isn't required.
|
||||
*/
|
||||
if (!is_highmem_idx(ZONE_MOVABLE))
|
||||
iotable_init(&map, 1);
|
||||
iotable_init(&map, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
|
|||
/* LSE atomics */
|
||||
" mvn %w[i], %w[i]\n"
|
||||
" stclr %w[i], %[v]")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
|
|||
/* LSE atomics */ \
|
||||
" mvn %w[i], %w[i]\n" \
|
||||
" ldclr" #mb " %w[i], %w[i], %[v]") \
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
|
@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
|
|||
/* LSE atomics */
|
||||
" neg %w[i], %w[i]\n"
|
||||
" stadd %w[i], %[v]")
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
|
|||
" neg %w[i], %w[i]\n" \
|
||||
" ldadd" #mb " %w[i], w30, %[v]\n" \
|
||||
" add %w[i], %w[i], w30") \
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS , ##cl); \
|
||||
\
|
||||
|
@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
|
|||
/* LSE atomics */ \
|
||||
" neg %w[i], %w[i]\n" \
|
||||
" ldadd" #mb " %w[i], %w[i], %[v]") \
|
||||
: [i] "+r" (w0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (w0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
|
@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
|
|||
/* LSE atomics */
|
||||
" mvn %[i], %[i]\n"
|
||||
" stclr %[i], %[v]")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
|
|||
/* LSE atomics */ \
|
||||
" mvn %[i], %[i]\n" \
|
||||
" ldclr" #mb " %[i], %[i], %[v]") \
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
|
@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
|
|||
/* LSE atomics */
|
||||
" neg %[i], %[i]\n"
|
||||
" stadd %[i], %[v]")
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter)
|
||||
: "r" (x1)
|
||||
: __LL_SC_CLOBBERS);
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
|
|||
" neg %[i], %[i]\n" \
|
||||
" ldadd" #mb " %[i], x30, %[v]\n" \
|
||||
" add %[i], %[i], x30") \
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
|
@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
|
|||
/* LSE atomics */ \
|
||||
" neg %[i], %[i]\n" \
|
||||
" ldadd" #mb " %[i], %[i], %[v]") \
|
||||
: [i] "+r" (x0), [v] "+Q" (v->counter) \
|
||||
: [i] "+&r" (x0), [v] "+Q" (v->counter) \
|
||||
: "r" (x1) \
|
||||
: __LL_SC_CLOBBERS, ##cl); \
|
||||
\
|
||||
|
@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
|
|||
" sub x30, x30, %[ret]\n"
|
||||
" cbnz x30, 1b\n"
|
||||
"2:")
|
||||
: [ret] "+r" (x0), [v] "+Q" (v->counter)
|
||||
: [ret] "+&r" (x0), [v] "+Q" (v->counter)
|
||||
:
|
||||
: __LL_SC_CLOBBERS, "cc", "memory");
|
||||
|
||||
|
@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
|
|||
" eor %[old1], %[old1], %[oldval1]\n" \
|
||||
" eor %[old2], %[old2], %[oldval2]\n" \
|
||||
" orr %[old1], %[old1], %[old2]") \
|
||||
: [old1] "+r" (x0), [old2] "+r" (x1), \
|
||||
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
|
||||
[v] "+Q" (*(unsigned long *)ptr) \
|
||||
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
|
||||
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
|
||||
|
|
|
@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
|
|||
/* arm-smccc */
|
||||
EXPORT_SYMBOL(__arm_smccc_smc);
|
||||
EXPORT_SYMBOL(__arm_smccc_hvc);
|
||||
|
||||
/* tishift.S */
|
||||
extern long long __ashlti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__ashlti3);
|
||||
extern long long __ashrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__ashrti3);
|
||||
extern long long __lshrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__lshrti3);
|
||||
|
|
|
@ -1,17 +1,6 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
* Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
|
|
@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
|
|||
static void __do_user_fault(struct siginfo *info, unsigned int esr)
|
||||
{
|
||||
current->thread.fault_address = (unsigned long)info->si_addr;
|
||||
|
||||
/*
|
||||
* If the faulting address is in the kernel, we must sanitize the ESR.
|
||||
* From userspace's point of view, kernel-only mappings don't exist
|
||||
* at all, so we report them as level 0 translation faults.
|
||||
* (This is not quite the way that "no mapping there at all" behaves:
|
||||
* an alignment fault not caused by the memory type would take
|
||||
* precedence over translation fault for a real access to empty
|
||||
* space. Unfortunately we can't easily distinguish "alignment fault
|
||||
* not caused by memory type" from "alignment fault caused by memory
|
||||
* type", so we ignore this wrinkle and just return the translation
|
||||
* fault.)
|
||||
*/
|
||||
if (current->thread.fault_address >= TASK_SIZE) {
|
||||
switch (ESR_ELx_EC(esr)) {
|
||||
case ESR_ELx_EC_DABT_LOW:
|
||||
/*
|
||||
* These bits provide only information about the
|
||||
* faulting instruction, which userspace knows already.
|
||||
* We explicitly clear bits which are architecturally
|
||||
* RES0 in case they are given meanings in future.
|
||||
* We always report the ESR as if the fault was taken
|
||||
* to EL1 and so ISV and the bits in ISS[23:14] are
|
||||
* clear. (In fact it always will be a fault to EL1.)
|
||||
*/
|
||||
esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
|
||||
ESR_ELx_CM | ESR_ELx_WNR;
|
||||
esr |= ESR_ELx_FSC_FAULT;
|
||||
break;
|
||||
case ESR_ELx_EC_IABT_LOW:
|
||||
/*
|
||||
* Claim a level 0 translation fault.
|
||||
* All other bits are architecturally RES0 for faults
|
||||
* reported with that DFSC value, so we clear them.
|
||||
*/
|
||||
esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
|
||||
esr |= ESR_ELx_FSC_FAULT;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* This should never happen (entry.S only brings us
|
||||
* into this code for insn and data aborts from a lower
|
||||
* exception level). Fail safe by not providing an ESR
|
||||
* context record at all.
|
||||
*/
|
||||
WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
|
||||
esr = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
current->thread.fault_code = esr;
|
||||
arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
|
||||
}
|
||||
|
|
|
@ -933,13 +933,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
|
|||
{
|
||||
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
|
||||
|
||||
/* ioremap_page_range doesn't honour BBM */
|
||||
if (pud_present(READ_ONCE(*pudp)))
|
||||
/* Only allow permission changes for now */
|
||||
if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
|
||||
pud_val(new_pud)))
|
||||
return 0;
|
||||
|
||||
BUG_ON(phys & ~PUD_MASK);
|
||||
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
|
||||
set_pud(pudp, new_pud);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -947,13 +949,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
|
|||
{
|
||||
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
|
||||
|
||||
/* ioremap_page_range doesn't honour BBM */
|
||||
if (pmd_present(READ_ONCE(*pmdp)))
|
||||
/* Only allow permission changes for now */
|
||||
if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
|
||||
pmd_val(new_pmd)))
|
||||
return 0;
|
||||
|
||||
BUG_ON(phys & ~PMD_MASK);
|
||||
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
|
||||
set_pmd(pmdp, new_pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -74,6 +74,27 @@
|
|||
*/
|
||||
#define EX_R3 EX_DAR
|
||||
|
||||
#define STF_ENTRY_BARRIER_SLOT \
|
||||
STF_ENTRY_BARRIER_FIXUP_SECTION; \
|
||||
nop; \
|
||||
nop; \
|
||||
nop
|
||||
|
||||
#define STF_EXIT_BARRIER_SLOT \
|
||||
STF_EXIT_BARRIER_FIXUP_SECTION; \
|
||||
nop; \
|
||||
nop; \
|
||||
nop; \
|
||||
nop; \
|
||||
nop; \
|
||||
nop
|
||||
|
||||
/*
|
||||
* r10 must be free to use, r13 must be paca
|
||||
*/
|
||||
#define INTERRUPT_TO_KERNEL \
|
||||
STF_ENTRY_BARRIER_SLOT
|
||||
|
||||
/*
|
||||
* Macros for annotating the expected destination of (h)rfid
|
||||
*
|
||||
|
@ -90,16 +111,19 @@
|
|||
rfid
|
||||
|
||||
#define RFI_TO_USER \
|
||||
STF_EXIT_BARRIER_SLOT; \
|
||||
RFI_FLUSH_SLOT; \
|
||||
rfid; \
|
||||
b rfi_flush_fallback
|
||||
|
||||
#define RFI_TO_USER_OR_KERNEL \
|
||||
STF_EXIT_BARRIER_SLOT; \
|
||||
RFI_FLUSH_SLOT; \
|
||||
rfid; \
|
||||
b rfi_flush_fallback
|
||||
|
||||
#define RFI_TO_GUEST \
|
||||
STF_EXIT_BARRIER_SLOT; \
|
||||
RFI_FLUSH_SLOT; \
|
||||
rfid; \
|
||||
b rfi_flush_fallback
|
||||
|
@ -108,21 +132,25 @@
|
|||
hrfid
|
||||
|
||||
#define HRFI_TO_USER \
|
||||
STF_EXIT_BARRIER_SLOT; \
|
||||
RFI_FLUSH_SLOT; \
|
||||
hrfid; \
|
||||
b hrfi_flush_fallback
|
||||
|
||||
#define HRFI_TO_USER_OR_KERNEL \
|
||||
STF_EXIT_BARRIER_SLOT; \
|
||||
RFI_FLUSH_SLOT; \
|
||||
hrfid; \
|
||||
b hrfi_flush_fallback
|
||||
|
||||
#define HRFI_TO_GUEST \
|
||||
STF_EXIT_BARRIER_SLOT; \
|
||||
RFI_FLUSH_SLOT; \
|
||||
hrfid; \
|
||||
b hrfi_flush_fallback
|
||||
|
||||
#define HRFI_TO_UNKNOWN \
|
||||
STF_EXIT_BARRIER_SLOT; \
|
||||
RFI_FLUSH_SLOT; \
|
||||
hrfid; \
|
||||
b hrfi_flush_fallback
|
||||
|
@ -254,6 +282,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
|||
#define __EXCEPTION_PROLOG_1_PRE(area) \
|
||||
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
|
||||
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
|
||||
INTERRUPT_TO_KERNEL; \
|
||||
SAVE_CTR(r10, area); \
|
||||
mfcr r9;
|
||||
|
||||
|
|
|
@ -187,6 +187,22 @@ label##3: \
|
|||
FTR_ENTRY_OFFSET label##1b-label##3b; \
|
||||
.popsection;
|
||||
|
||||
#define STF_ENTRY_BARRIER_FIXUP_SECTION \
|
||||
953: \
|
||||
.pushsection __stf_entry_barrier_fixup,"a"; \
|
||||
.align 2; \
|
||||
954: \
|
||||
FTR_ENTRY_OFFSET 953b-954b; \
|
||||
.popsection;
|
||||
|
||||
#define STF_EXIT_BARRIER_FIXUP_SECTION \
|
||||
955: \
|
||||
.pushsection __stf_exit_barrier_fixup,"a"; \
|
||||
.align 2; \
|
||||
956: \
|
||||
FTR_ENTRY_OFFSET 955b-956b; \
|
||||
.popsection;
|
||||
|
||||
#define RFI_FLUSH_FIXUP_SECTION \
|
||||
951: \
|
||||
.pushsection __rfi_flush_fixup,"a"; \
|
||||
|
@ -199,6 +215,9 @@ label##3: \
|
|||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
|
||||
extern long stf_barrier_fallback;
|
||||
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
|
||||
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
|
||||
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
|
||||
|
||||
void apply_feature_fixups(void);
|
||||
|
|
|
@ -12,6 +12,17 @@
|
|||
extern unsigned long powerpc_security_features;
|
||||
extern bool rfi_flush;
|
||||
|
||||
/* These are bit flags */
|
||||
enum stf_barrier_type {
|
||||
STF_BARRIER_NONE = 0x1,
|
||||
STF_BARRIER_FALLBACK = 0x2,
|
||||
STF_BARRIER_EIEIO = 0x4,
|
||||
STF_BARRIER_SYNC_ORI = 0x8,
|
||||
};
|
||||
|
||||
void setup_stf_barrier(void);
|
||||
void do_stf_barrier_fixups(enum stf_barrier_type types);
|
||||
|
||||
static inline void security_ftr_set(unsigned long feature)
|
||||
{
|
||||
powerpc_security_features |= feature;
|
||||
|
|
|
@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
|
|||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||
bl __init_LPCR_ISA206
|
||||
|
@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
|
|||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||
bl __init_LPCR_ISA206
|
||||
|
@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
|
|||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
ori r3, r3, LPCR_PECEDH
|
||||
li r4,0 /* LPES = 0 */
|
||||
|
@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
|
|||
beqlr
|
||||
li r0,0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
ori r3, r3, LPCR_PECEDH
|
||||
li r4,0 /* LPES = 0 */
|
||||
|
@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9)
|
|||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
|
@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
|
|||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
mtspr SPRN_PCR,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
|
|
|
@ -101,6 +101,7 @@ static void __restore_cpu_cpufeatures(void)
|
|||
if (hv_mode) {
|
||||
mtspr(SPRN_LPID, 0);
|
||||
mtspr(SPRN_HFSCR, system_registers.hfscr);
|
||||
mtspr(SPRN_PCR, 0);
|
||||
}
|
||||
mtspr(SPRN_FSCR, system_registers.fscr);
|
||||
|
||||
|
|
|
@ -885,7 +885,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
|||
#endif
|
||||
|
||||
|
||||
EXC_REAL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
|
||||
EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
|
||||
EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
|
||||
TRAMP_KVM(PACA_EXGEN, 0x900)
|
||||
EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
|
||||
|
@ -961,6 +961,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
|
|||
mtctr r13; \
|
||||
GET_PACA(r13); \
|
||||
std r10,PACA_EXGEN+EX_R10(r13); \
|
||||
INTERRUPT_TO_KERNEL; \
|
||||
KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
|
||||
HMT_MEDIUM; \
|
||||
mfctr r9;
|
||||
|
@ -969,7 +970,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
|
|||
#define SYSCALL_KVMTEST \
|
||||
HMT_MEDIUM; \
|
||||
mr r9,r13; \
|
||||
GET_PACA(r13);
|
||||
GET_PACA(r13); \
|
||||
INTERRUPT_TO_KERNEL;
|
||||
#endif
|
||||
|
||||
#define LOAD_SYSCALL_HANDLER(reg) \
|
||||
|
@ -1507,6 +1509,19 @@ masked_##_H##interrupt: \
|
|||
b .; \
|
||||
MASKED_DEC_HANDLER(_H)
|
||||
|
||||
TRAMP_REAL_BEGIN(stf_barrier_fallback)
|
||||
std r9,PACA_EXRFI+EX_R9(r13)
|
||||
std r10,PACA_EXRFI+EX_R10(r13)
|
||||
sync
|
||||
ld r9,PACA_EXRFI+EX_R9(r13)
|
||||
ld r10,PACA_EXRFI+EX_R10(r13)
|
||||
ori 31,31,0
|
||||
.rept 14
|
||||
b 1f
|
||||
1:
|
||||
.endr
|
||||
blr
|
||||
|
||||
TRAMP_REAL_BEGIN(rfi_flush_fallback)
|
||||
SET_SCRATCH0(r13);
|
||||
GET_PACA(r13);
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/seq_buf.h>
|
||||
|
||||
#include <asm/debugfs.h>
|
||||
#include <asm/security_features.h>
|
||||
|
||||
|
||||
|
@ -86,3 +87,151 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
|
|||
|
||||
return s.len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store-forwarding barrier support.
|
||||
*/
|
||||
|
||||
static enum stf_barrier_type stf_enabled_flush_types;
|
||||
static bool no_stf_barrier;
|
||||
bool stf_barrier;
|
||||
|
||||
static int __init handle_no_stf_barrier(char *p)
|
||||
{
|
||||
pr_info("stf-barrier: disabled on command line.");
|
||||
no_stf_barrier = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("no_stf_barrier", handle_no_stf_barrier);
|
||||
|
||||
/* This is the generic flag used by other architectures */
|
||||
static int __init handle_ssbd(char *p)
|
||||
{
|
||||
if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
|
||||
/* Until firmware tells us, we have the barrier with auto */
|
||||
return 0;
|
||||
} else if (strncmp(p, "off", 3) == 0) {
|
||||
handle_no_stf_barrier(NULL);
|
||||
return 0;
|
||||
} else
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("spec_store_bypass_disable", handle_ssbd);
|
||||
|
||||
/* This is the generic flag used by other architectures */
|
||||
static int __init handle_no_ssbd(char *p)
|
||||
{
|
||||
handle_no_stf_barrier(NULL);
|
||||
return 0;
|
||||
}
|
||||
early_param("nospec_store_bypass_disable", handle_no_ssbd);
|
||||
|
||||
static void stf_barrier_enable(bool enable)
|
||||
{
|
||||
if (enable)
|
||||
do_stf_barrier_fixups(stf_enabled_flush_types);
|
||||
else
|
||||
do_stf_barrier_fixups(STF_BARRIER_NONE);
|
||||
|
||||
stf_barrier = enable;
|
||||
}
|
||||
|
||||
void setup_stf_barrier(void)
|
||||
{
|
||||
enum stf_barrier_type type;
|
||||
bool enable, hv;
|
||||
|
||||
hv = cpu_has_feature(CPU_FTR_HVMODE);
|
||||
|
||||
/* Default to fallback in case fw-features are not available */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
type = STF_BARRIER_EIEIO;
|
||||
else if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
type = STF_BARRIER_SYNC_ORI;
|
||||
else if (cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
type = STF_BARRIER_FALLBACK;
|
||||
else
|
||||
type = STF_BARRIER_NONE;
|
||||
|
||||
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
|
||||
(security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
|
||||
(security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
|
||||
|
||||
if (type == STF_BARRIER_FALLBACK) {
|
||||
pr_info("stf-barrier: fallback barrier available\n");
|
||||
} else if (type == STF_BARRIER_SYNC_ORI) {
|
||||
pr_info("stf-barrier: hwsync barrier available\n");
|
||||
} else if (type == STF_BARRIER_EIEIO) {
|
||||
pr_info("stf-barrier: eieio barrier available\n");
|
||||
}
|
||||
|
||||
stf_enabled_flush_types = type;
|
||||
|
||||
if (!no_stf_barrier)
|
||||
stf_barrier_enable(enable);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
|
||||
const char *type;
|
||||
switch (stf_enabled_flush_types) {
|
||||
case STF_BARRIER_EIEIO:
|
||||
type = "eieio";
|
||||
break;
|
||||
case STF_BARRIER_SYNC_ORI:
|
||||
type = "hwsync";
|
||||
break;
|
||||
case STF_BARRIER_FALLBACK:
|
||||
type = "fallback";
|
||||
break;
|
||||
default:
|
||||
type = "unknown";
|
||||
}
|
||||
return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
|
||||
}
|
||||
|
||||
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
|
||||
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int stf_barrier_set(void *data, u64 val)
|
||||
{
|
||||
bool enable;
|
||||
|
||||
if (val == 1)
|
||||
enable = true;
|
||||
else if (val == 0)
|
||||
enable = false;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
/* Only do anything if we're changing state */
|
||||
if (enable != stf_barrier)
|
||||
stf_barrier_enable(enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stf_barrier_get(void *data, u64 *val)
|
||||
{
|
||||
*val = stf_barrier ? 1 : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
|
||||
|
||||
static __init int stf_barrier_debugfs_init(void)
|
||||
{
|
||||
debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(stf_barrier_debugfs_init);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
|
|
@ -133,6 +133,20 @@ SECTIONS
|
|||
RO_DATA(PAGE_SIZE)
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
. = ALIGN(8);
|
||||
__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
|
||||
__start___stf_entry_barrier_fixup = .;
|
||||
*(__stf_entry_barrier_fixup)
|
||||
__stop___stf_entry_barrier_fixup = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
|
||||
__start___stf_exit_barrier_fixup = .;
|
||||
*(__stf_exit_barrier_fixup)
|
||||
__stop___stf_exit_barrier_fixup = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
|
||||
__start___rfi_flush_fixup = .;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/security_features.h>
|
||||
#include <asm/firmware.h>
|
||||
|
||||
struct fixup_entry {
|
||||
|
@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
|
||||
{
|
||||
unsigned int instrs[3], *dest;
|
||||
long *start, *end;
|
||||
int i;
|
||||
|
||||
start = PTRRELOC(&__start___stf_entry_barrier_fixup),
|
||||
end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
|
||||
|
||||
instrs[0] = 0x60000000; /* nop */
|
||||
instrs[1] = 0x60000000; /* nop */
|
||||
instrs[2] = 0x60000000; /* nop */
|
||||
|
||||
i = 0;
|
||||
if (types & STF_BARRIER_FALLBACK) {
|
||||
instrs[i++] = 0x7d4802a6; /* mflr r10 */
|
||||
instrs[i++] = 0x60000000; /* branch patched below */
|
||||
instrs[i++] = 0x7d4803a6; /* mtlr r10 */
|
||||
} else if (types & STF_BARRIER_EIEIO) {
|
||||
instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
|
||||
} else if (types & STF_BARRIER_SYNC_ORI) {
|
||||
instrs[i++] = 0x7c0004ac; /* hwsync */
|
||||
instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
|
||||
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
||||
}
|
||||
|
||||
for (i = 0; start < end; start++, i++) {
|
||||
dest = (void *)start + *start;
|
||||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction(dest, instrs[0]);
|
||||
|
||||
if (types & STF_BARRIER_FALLBACK)
|
||||
patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
|
||||
BRANCH_SET_LINK);
|
||||
else
|
||||
patch_instruction(dest + 1, instrs[1]);
|
||||
|
||||
patch_instruction(dest + 2, instrs[2]);
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
|
||||
(types == STF_BARRIER_NONE) ? "no" :
|
||||
(types == STF_BARRIER_FALLBACK) ? "fallback" :
|
||||
(types == STF_BARRIER_EIEIO) ? "eieio" :
|
||||
(types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
|
||||
: "unknown");
|
||||
}
|
||||
|
||||
void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
|
||||
{
|
||||
unsigned int instrs[6], *dest;
|
||||
long *start, *end;
|
||||
int i;
|
||||
|
||||
start = PTRRELOC(&__start___stf_exit_barrier_fixup),
|
||||
end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
|
||||
|
||||
instrs[0] = 0x60000000; /* nop */
|
||||
instrs[1] = 0x60000000; /* nop */
|
||||
instrs[2] = 0x60000000; /* nop */
|
||||
instrs[3] = 0x60000000; /* nop */
|
||||
instrs[4] = 0x60000000; /* nop */
|
||||
instrs[5] = 0x60000000; /* nop */
|
||||
|
||||
i = 0;
|
||||
if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
|
||||
instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
|
||||
} else {
|
||||
instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
|
||||
instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
|
||||
}
|
||||
instrs[i++] = 0x7c0004ac; /* hwsync */
|
||||
instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
|
||||
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
|
||||
} else {
|
||||
instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
|
||||
}
|
||||
} else if (types & STF_BARRIER_EIEIO) {
|
||||
instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
|
||||
}
|
||||
|
||||
for (i = 0; start < end; start++, i++) {
|
||||
dest = (void *)start + *start;
|
||||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction(dest, instrs[0]);
|
||||
patch_instruction(dest + 1, instrs[1]);
|
||||
patch_instruction(dest + 2, instrs[2]);
|
||||
patch_instruction(dest + 3, instrs[3]);
|
||||
patch_instruction(dest + 4, instrs[4]);
|
||||
patch_instruction(dest + 5, instrs[5]);
|
||||
}
|
||||
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
|
||||
(types == STF_BARRIER_NONE) ? "no" :
|
||||
(types == STF_BARRIER_FALLBACK) ? "fallback" :
|
||||
(types == STF_BARRIER_EIEIO) ? "eieio" :
|
||||
(types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
|
||||
: "unknown");
|
||||
}
|
||||
|
||||
|
||||
void do_stf_barrier_fixups(enum stf_barrier_type types)
|
||||
{
|
||||
do_stf_entry_barrier_fixups(types);
|
||||
do_stf_exit_barrier_fixups(types);
|
||||
}
|
||||
|
||||
void do_rfi_flush_fixups(enum l1d_flush_type types)
|
||||
{
|
||||
unsigned int instrs[3], *dest;
|
||||
|
|
|
@ -131,6 +131,7 @@ static void __init pnv_setup_arch(void)
|
|||
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
|
||||
|
||||
pnv_setup_rfi_flush();
|
||||
setup_stf_barrier();
|
||||
|
||||
/* Initialize SMP */
|
||||
pnv_smp_init();
|
||||
|
|
|
@ -710,6 +710,7 @@ static void __init pSeries_setup_arch(void)
|
|||
fwnmi_init();
|
||||
|
||||
pseries_setup_rfi_flush();
|
||||
setup_stf_barrier();
|
||||
|
||||
/* By default, only probe PCI (can be overridden by rtas_pci) */
|
||||
pci_add_flags(PCI_PROBE_ONLY);
|
||||
|
|
|
@ -198,7 +198,6 @@
|
|||
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
|
||||
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
|
||||
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
|
||||
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
|
||||
|
@ -207,13 +206,19 @@
|
|||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
||||
#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
|
||||
|
||||
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
||||
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
||||
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
||||
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
|
@ -274,9 +279,10 @@
|
|||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
|
||||
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
|
||||
#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
|
@ -334,6 +340,7 @@
|
|||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
|
@ -363,5 +370,6 @@
|
|||
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
|
||||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -924,7 +924,7 @@ struct kvm_x86_ops {
|
|||
int (*hardware_setup)(void); /* __init */
|
||||
void (*hardware_unsetup)(void); /* __exit */
|
||||
bool (*cpu_has_accelerated_tpr)(void);
|
||||
bool (*cpu_has_high_real_mode_segbase)(void);
|
||||
bool (*has_emulated_msr)(int index);
|
||||
void (*cpuid_update)(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm *(*vm_alloc)(void);
|
||||
|
|
|
@ -42,6 +42,8 @@
|
|||
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
||||
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
|
||||
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
|
||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||
#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
|
||||
|
@ -68,6 +70,11 @@
|
|||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_SSB_NO (1 << 4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
* attack, so no Speculative Store Bypass
|
||||
* control required.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
@ -340,6 +347,8 @@
|
|||
#define MSR_AMD64_SEV_ENABLED_BIT 0
|
||||
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
|
||||
|
||||
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
|
||||
|
||||
/* Fam 17h MSRs */
|
||||
#define MSR_F17H_IRPERF 0xc00000e9
|
||||
|
||||
|
|
|
@ -217,6 +217,14 @@ enum spectre_v2_mitigation {
|
|||
SPECTRE_V2_IBRS,
|
||||
};
|
||||
|
||||
/* The Speculative Store Bypass disable variants */
|
||||
enum ssb_mitigation {
|
||||
SPEC_STORE_BYPASS_NONE,
|
||||
SPEC_STORE_BYPASS_DISABLE,
|
||||
SPEC_STORE_BYPASS_PRCTL,
|
||||
SPEC_STORE_BYPASS_SECCOMP,
|
||||
};
|
||||
|
||||
extern char __indirect_thunk_start[];
|
||||
extern char __indirect_thunk_end[];
|
||||
|
||||
|
@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
#define alternative_msr_write(_msr, _val, _feature) \
|
||||
asm volatile(ALTERNATIVE("", \
|
||||
"movl %[msr], %%ecx\n\t" \
|
||||
"movl %[val], %%eax\n\t" \
|
||||
"movl $0, %%edx\n\t" \
|
||||
"wrmsr", \
|
||||
_feature) \
|
||||
: : [msr] "i" (_msr), [val] "i" (_val) \
|
||||
: "eax", "ecx", "edx", "memory")
|
||||
static __always_inline
|
||||
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
|
||||
: : "c" (msr),
|
||||
"a" ((u32)val),
|
||||
"d" ((u32)(val >> 32)),
|
||||
[feature] "i" (feature)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void indirect_branch_prediction_barrier(void)
|
||||
{
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
|
||||
X86_FEATURE_USE_IBPB);
|
||||
u64 val = PRED_CMD_IBPB;
|
||||
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
||||
}
|
||||
|
||||
/* The Intel SPEC CTRL MSR base value cache */
|
||||
extern u64 x86_spec_ctrl_base;
|
||||
|
||||
/*
|
||||
* With retpoline, we must use IBRS to restrict branch prediction
|
||||
* before calling into firmware.
|
||||
|
@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
|
|||
*/
|
||||
#define firmware_restrict_branch_speculation_start() \
|
||||
do { \
|
||||
u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
|
||||
\
|
||||
preempt_disable(); \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
||||
X86_FEATURE_USE_IBRS_FW); \
|
||||
} while (0)
|
||||
|
||||
#define firmware_restrict_branch_speculation_end() \
|
||||
do { \
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \
|
||||
u64 val = x86_spec_ctrl_base; \
|
||||
\
|
||||
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
|
||||
X86_FEATURE_USE_IBRS_FW); \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_X86_SPECCTRL_H_
|
||||
#define _ASM_X86_SPECCTRL_H_
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
/*
|
||||
* On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
|
||||
* the guest has, while on VMEXIT we restore the host view. This
|
||||
* would be easier if SPEC_CTRL were architecturally maskable or
|
||||
* shadowable for guests but this is not (currently) the case.
|
||||
* Takes the guest view of SPEC_CTRL MSR as a parameter and also
|
||||
* the guest's version of VIRT_SPEC_CTRL, if emulated.
|
||||
*/
|
||||
extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
|
||||
|
||||
/**
|
||||
* x86_spec_ctrl_set_guest - Set speculation control registers for the guest
|
||||
* @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
||||
* @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
||||
* (may get translated to MSR_AMD64_LS_CFG bits)
|
||||
*
|
||||
* Avoids writing to the MSR if the content/bits are the same
|
||||
*/
|
||||
static inline
|
||||
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
||||
{
|
||||
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* x86_spec_ctrl_restore_host - Restore host speculation control registers
|
||||
* @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
|
||||
* @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
|
||||
* (may get translated to MSR_AMD64_LS_CFG bits)
|
||||
*
|
||||
* Avoids writing to the MSR if the content/bits are the same
|
||||
*/
|
||||
static inline
|
||||
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
|
||||
{
|
||||
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
|
||||
}
|
||||
|
||||
/* AMD specific Speculative Store Bypass MSR data */
|
||||
extern u64 x86_amd_ls_cfg_base;
|
||||
extern u64 x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
|
||||
{
|
||||
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
||||
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
|
||||
{
|
||||
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
||||
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
||||
}
|
||||
|
||||
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
|
||||
{
|
||||
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void speculative_store_bypass_ht_init(void);
|
||||
#else
|
||||
static inline void speculative_store_bypass_ht_init(void) { }
|
||||
#endif
|
||||
|
||||
extern void speculative_store_bypass_update(unsigned long tif);
|
||||
|
||||
static inline void speculative_store_bypass_update_current(void)
|
||||
{
|
||||
speculative_store_bypass_update(current_thread_info()->flags);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -79,6 +79,7 @@ struct thread_info {
|
|||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
|
||||
#define TIF_SSBD 5 /* Reduced data speculation */
|
||||
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 8 /* secure computing */
|
||||
|
@ -105,6 +106,7 @@ struct thread_info {
|
|||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_SSBD (1 << TIF_SSBD)
|
||||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
@ -144,7 +146,7 @@ struct thread_info {
|
|||
|
||||
/* flags to check in __switch_to() */
|
||||
#define _TIF_WORK_CTXSW \
|
||||
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
|
||||
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
|
||||
|
||||
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/delay.h>
|
||||
|
@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
|||
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
||||
nodes_per_socket = ((value >> 3) & 7) + 1;
|
||||
}
|
||||
|
||||
if (c->x86 >= 0x15 && c->x86 <= 0x17) {
|
||||
unsigned int bit;
|
||||
|
||||
switch (c->x86) {
|
||||
case 0x15: bit = 54; break;
|
||||
case 0x16: bit = 33; break;
|
||||
case 0x17: bit = 10; break;
|
||||
default: return;
|
||||
}
|
||||
/*
|
||||
* Try to cache the base value so further operations can
|
||||
* avoid RMW. If that faults, do not enable SSBD.
|
||||
*/
|
||||
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
|
||||
setup_force_cpu_cap(X86_FEATURE_SSBD);
|
||||
x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
|
||||
|
@ -791,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
/*
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
|
||||
* all up to and including B1.
|
||||
|
|
|
@ -12,8 +12,10 @@
|
|||
#include <linux/utsname.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/prctl.h>
|
||||
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/cmdline.h>
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -27,6 +29,27 @@
|
|||
#include <asm/intel-family.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
|
||||
/*
|
||||
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
||||
* writes to SPEC_CTRL contain whatever reserved bits have been set.
|
||||
*/
|
||||
u64 __ro_after_init x86_spec_ctrl_base;
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
|
||||
/*
|
||||
* The vendor and possibly platform specific bits which can be modified in
|
||||
* x86_spec_ctrl_base.
|
||||
*/
|
||||
static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
|
||||
|
||||
/*
|
||||
* AMD specific MSR info for Speculative Store Bypass control.
|
||||
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
|
||||
*/
|
||||
u64 __ro_after_init x86_amd_ls_cfg_base;
|
||||
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
void __init check_bugs(void)
|
||||
{
|
||||
|
@ -37,9 +60,27 @@ void __init check_bugs(void)
|
|||
print_cpu_info(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the SPEC_CTRL MSR to account for reserved bits which may
|
||||
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
|
||||
* init code as it is not enumerated and depends on the family.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
|
||||
/* Allow STIBP in MSR_SPEC_CTRL if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_STIBP))
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
||||
|
||||
/* Select the proper spectre mitigation before patching alternatives */
|
||||
spectre_v2_select_mitigation();
|
||||
|
||||
/*
|
||||
* Select proper mitigation for any exposure to the Speculative Store
|
||||
* Bypass vulnerability.
|
||||
*/
|
||||
ssb_select_mitigation();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Check whether we are able to run this kernel safely on SMP.
|
||||
|
@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = {
|
|||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||
SPECTRE_V2_NONE;
|
||||
|
||||
void
|
||||
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||
{
|
||||
u64 msrval, guestval, hostval = x86_spec_ctrl_base;
|
||||
struct thread_info *ti = current_thread_info();
|
||||
|
||||
/* Is MSR_SPEC_CTRL implemented ? */
|
||||
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
|
||||
/*
|
||||
* Restrict guest_spec_ctrl to supported values. Clear the
|
||||
* modifiable bits in the host base value and or the
|
||||
* modifiable bits from the guest value.
|
||||
*/
|
||||
guestval = hostval & ~x86_spec_ctrl_mask;
|
||||
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
|
||||
|
||||
/* SSBD controlled in MSR_SPEC_CTRL */
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
||||
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
if (hostval != guestval) {
|
||||
msrval = setguest ? guestval : hostval;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
|
||||
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
|
||||
!static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the host has SSBD mitigation enabled, force it in the host's
|
||||
* virtual MSR value. If its not permanently enabled, evaluate
|
||||
* current's TIF_SSBD thread flag.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
|
||||
hostval = SPEC_CTRL_SSBD;
|
||||
else
|
||||
hostval = ssbd_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
/* Sanitize the guest value */
|
||||
guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
|
||||
|
||||
if (hostval != guestval) {
|
||||
unsigned long tif;
|
||||
|
||||
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
|
||||
ssbd_spec_ctrl_to_tif(hostval);
|
||||
|
||||
speculative_store_bypass_update(tif);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
|
||||
|
||||
static void x86_amd_ssb_disable(void)
|
||||
{
|
||||
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
|
||||
else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
||||
}
|
||||
|
||||
#ifdef RETPOLINE
|
||||
static bool spectre_v2_bad_module;
|
||||
|
@ -312,32 +422,289 @@ retpoline_auto:
|
|||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
|
||||
|
||||
static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
|
||||
|
||||
/* The kernel command line selection */
|
||||
enum ssb_mitigation_cmd {
|
||||
SPEC_STORE_BYPASS_CMD_NONE,
|
||||
SPEC_STORE_BYPASS_CMD_AUTO,
|
||||
SPEC_STORE_BYPASS_CMD_ON,
|
||||
SPEC_STORE_BYPASS_CMD_PRCTL,
|
||||
SPEC_STORE_BYPASS_CMD_SECCOMP,
|
||||
};
|
||||
|
||||
static const char *ssb_strings[] = {
|
||||
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
|
||||
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
|
||||
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
|
||||
[SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
} ssb_mitigation_options[] = {
|
||||
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
||||
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
||||
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
||||
{ "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
|
||||
{ "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
|
||||
};
|
||||
|
||||
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
||||
{
|
||||
enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
|
||||
return SPEC_STORE_BYPASS_CMD_NONE;
|
||||
} else {
|
||||
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
|
||||
arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
|
||||
if (!match_option(arg, ret, ssb_mitigation_options[i].option))
|
||||
continue;
|
||||
|
||||
cmd = ssb_mitigation_options[i].cmd;
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
||||
return SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
}
|
||||
}
|
||||
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
{
|
||||
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_SSBD))
|
||||
return mode;
|
||||
|
||||
cmd = ssb_parse_cmdline();
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
|
||||
(cmd == SPEC_STORE_BYPASS_CMD_NONE ||
|
||||
cmd == SPEC_STORE_BYPASS_CMD_AUTO))
|
||||
return mode;
|
||||
|
||||
switch (cmd) {
|
||||
case SPEC_STORE_BYPASS_CMD_AUTO:
|
||||
case SPEC_STORE_BYPASS_CMD_SECCOMP:
|
||||
/*
|
||||
* Choose prctl+seccomp as the default mode if seccomp is
|
||||
* enabled.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SECCOMP))
|
||||
mode = SPEC_STORE_BYPASS_SECCOMP;
|
||||
else
|
||||
mode = SPEC_STORE_BYPASS_PRCTL;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_ON:
|
||||
mode = SPEC_STORE_BYPASS_DISABLE;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_PRCTL:
|
||||
mode = SPEC_STORE_BYPASS_PRCTL;
|
||||
break;
|
||||
case SPEC_STORE_BYPASS_CMD_NONE:
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have three CPU feature flags that are in play here:
|
||||
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
||||
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
|
||||
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
|
||||
*/
|
||||
if (mode == SPEC_STORE_BYPASS_DISABLE) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
|
||||
/*
|
||||
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
|
||||
* a completely different MSR and bit dependent on family.
|
||||
*/
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
x86_amd_ssb_disable();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
static void ssb_select_mitigation(void)
|
||||
{
|
||||
ssb_mode = __ssb_select_mitigation();
|
||||
|
||||
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
||||
pr_info("%s\n", ssb_strings[ssb_mode]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Speculation prctl: " fmt
|
||||
|
||||
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
bool update;
|
||||
|
||||
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
|
||||
ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
|
||||
return -ENXIO;
|
||||
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
/* If speculation is force disabled, enable is not allowed */
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return -EPERM;
|
||||
task_clear_spec_ssb_disable(task);
|
||||
update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
case PR_SPEC_DISABLE:
|
||||
task_set_spec_ssb_disable(task);
|
||||
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
task_set_spec_ssb_disable(task);
|
||||
task_set_spec_ssb_force_disable(task);
|
||||
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* If being set on non-current task, delay setting the CPU
|
||||
* mitigation until it is next scheduled.
|
||||
*/
|
||||
if (task == current && update)
|
||||
speculative_store_bypass_update_current();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
||||
unsigned long ctrl)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssb_prctl_set(task, ctrl);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SECCOMP
|
||||
void arch_seccomp_spec_mitigate(struct task_struct *task)
|
||||
{
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
|
||||
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int ssb_prctl_get(struct task_struct *task)
|
||||
{
|
||||
switch (ssb_mode) {
|
||||
case SPEC_STORE_BYPASS_DISABLE:
|
||||
return PR_SPEC_DISABLE;
|
||||
case SPEC_STORE_BYPASS_SECCOMP:
|
||||
case SPEC_STORE_BYPASS_PRCTL:
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ssb_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
default:
|
||||
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
||||
return PR_SPEC_ENABLE;
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssb_prctl_get(task);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
void x86_spec_ctrl_setup_ap(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
||||
x86_amd_ssb_disable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
if (!boot_cpu_has_bug(bug))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
switch (bug) {
|
||||
case X86_BUG_CPU_MELTDOWN:
|
||||
if (boot_cpu_has(X86_FEATURE_PTI))
|
||||
return sprintf(buf, "Mitigation: PTI\n");
|
||||
|
||||
break;
|
||||
|
||||
case X86_BUG_SPECTRE_V1:
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
spectre_v2_module_string());
|
||||
|
||||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
if (boot_cpu_has(X86_FEATURE_PTI))
|
||||
return sprintf(buf, "Mitigation: PTI\n");
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
spectre_v2_module_string());
|
||||
ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -757,17 +757,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
|
|||
* and they also have a different bit for STIBP support. Also,
|
||||
* a hypervisor might have set the individual AMD bits even on
|
||||
* Intel CPUs, for finer-grained selection of what's available.
|
||||
*
|
||||
* We use the AMD bits in 0x8000_0008 EBX as the generic hardware
|
||||
* features, which are visible in /proc/cpuinfo and used by the
|
||||
* kernel. So set those accordingly from the Intel bits.
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
||||
set_cpu_cap(c, X86_FEATURE_IBRS);
|
||||
set_cpu_cap(c, X86_FEATURE_IBPB);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
||||
set_cpu_cap(c, X86_FEATURE_STIBP);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
cpu_has(c, X86_FEATURE_VIRT_SSBD))
|
||||
set_cpu_cap(c, X86_FEATURE_SSBD);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
||||
set_cpu_cap(c, X86_FEATURE_IBRS);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_IBPB))
|
||||
set_cpu_cap(c, X86_FEATURE_IBPB);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
||||
set_cpu_cap(c, X86_FEATURE_STIBP);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
}
|
||||
}
|
||||
|
||||
void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
|
@ -927,21 +942,55 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
|
||||
static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
||||
{ X86_VENDOR_CENTAUR, 5, },
|
||||
{ X86_VENDOR_INTEL, 5, },
|
||||
{ X86_VENDOR_NSC, 5, },
|
||||
{ X86_VENDOR_AMD, 0x12, },
|
||||
{ X86_VENDOR_AMD, 0x11, },
|
||||
{ X86_VENDOR_AMD, 0x10, },
|
||||
{ X86_VENDOR_AMD, 0xf, },
|
||||
{ X86_VENDOR_ANY, 4, },
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = 0;
|
||||
|
||||
if (x86_match_cpu(cpu_no_meltdown))
|
||||
return false;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
||||
|
||||
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
||||
!(ia32_cap & ARCH_CAP_SSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||
|
||||
if (x86_match_cpu(cpu_no_speculation))
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
|
||||
if (x86_match_cpu(cpu_no_meltdown))
|
||||
return;
|
||||
|
||||
/* Rogue Data Cache Load? No! */
|
||||
if (ia32_cap & ARCH_CAP_RDCL_NO)
|
||||
return false;
|
||||
return;
|
||||
|
||||
return true;
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -992,12 +1041,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||
|
||||
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
||||
|
||||
if (!x86_match_cpu(cpu_no_speculation)) {
|
||||
if (cpu_vulnerable_to_meltdown(c))
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
}
|
||||
cpu_set_bug_bits(c);
|
||||
|
||||
fpu__init_system(c);
|
||||
|
||||
|
@ -1359,6 +1403,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
|
|||
#endif
|
||||
mtrr_ap_init();
|
||||
validate_apic_and_package_id(c);
|
||||
x86_spec_ctrl_setup_ap();
|
||||
}
|
||||
|
||||
static __init int setup_noclflush(char *arg)
|
||||
|
|
|
@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
|||
|
||||
unsigned int aperfmperf_get_khz(int cpu);
|
||||
|
||||
extern void x86_spec_ctrl_setup_ap(void);
|
||||
|
||||
#endif /* ARCH_X86_CPU_H */
|
||||
|
|
|
@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||
setup_clear_cpu_cap(X86_FEATURE_IBPB);
|
||||
setup_clear_cpu_cap(X86_FEATURE_STIBP);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
|
||||
setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
|
||||
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SSBD);
|
||||
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <asm/switch_to.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/prctl.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
|
@ -278,6 +279,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
struct ssb_state {
|
||||
struct ssb_state *shared_state;
|
||||
raw_spinlock_t lock;
|
||||
unsigned int disable_state;
|
||||
unsigned long local_state;
|
||||
};
|
||||
|
||||
#define LSTATE_SSB 0
|
||||
|
||||
static DEFINE_PER_CPU(struct ssb_state, ssb_state);
|
||||
|
||||
void speculative_store_bypass_ht_init(void)
|
||||
{
|
||||
struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
unsigned int cpu;
|
||||
|
||||
st->local_state = 0;
|
||||
|
||||
/*
|
||||
* Shared state setup happens once on the first bringup
|
||||
* of the CPU. It's not destroyed on CPU hotunplug.
|
||||
*/
|
||||
if (st->shared_state)
|
||||
return;
|
||||
|
||||
raw_spin_lock_init(&st->lock);
|
||||
|
||||
/*
|
||||
* Go over HT siblings and check whether one of them has set up the
|
||||
* shared state pointer already.
|
||||
*/
|
||||
for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
|
||||
if (cpu == this_cpu)
|
||||
continue;
|
||||
|
||||
if (!per_cpu(ssb_state, cpu).shared_state)
|
||||
continue;
|
||||
|
||||
/* Link it to the state of the sibling: */
|
||||
st->shared_state = per_cpu(ssb_state, cpu).shared_state;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* First HT sibling to come up on the core. Link shared state of
|
||||
* the first HT sibling to itself. The siblings on the same core
|
||||
* which come up later will see the shared state pointer and link
|
||||
* themself to the state of this CPU.
|
||||
*/
|
||||
st->shared_state = st;
|
||||
}
|
||||
|
||||
/*
|
||||
* Logic is: First HT sibling enables SSBD for both siblings in the core
|
||||
* and last sibling to disable it, disables it for the whole core. This how
|
||||
* MSR_SPEC_CTRL works in "hardware":
|
||||
*
|
||||
* CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
|
||||
*/
|
||||
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
||||
{
|
||||
struct ssb_state *st = this_cpu_ptr(&ssb_state);
|
||||
u64 msr = x86_amd_ls_cfg_base;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_ZEN)) {
|
||||
msr |= ssbd_tif_to_amd_ls_cfg(tifn);
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (tifn & _TIF_SSBD) {
|
||||
/*
|
||||
* Since this can race with prctl(), block reentry on the
|
||||
* same CPU.
|
||||
*/
|
||||
if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
|
||||
return;
|
||||
|
||||
msr |= x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
raw_spin_lock(&st->shared_state->lock);
|
||||
/* First sibling enables SSBD: */
|
||||
if (!st->shared_state->disable_state)
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
st->shared_state->disable_state++;
|
||||
raw_spin_unlock(&st->shared_state->lock);
|
||||
} else {
|
||||
if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
|
||||
return;
|
||||
|
||||
raw_spin_lock(&st->shared_state->lock);
|
||||
st->shared_state->disable_state--;
|
||||
if (!st->shared_state->disable_state)
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
raw_spin_unlock(&st->shared_state->lock);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
|
||||
{
|
||||
u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
|
||||
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msr);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
|
||||
{
|
||||
/*
|
||||
* SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
|
||||
* so ssbd_tif_to_spec_ctrl() just works.
|
||||
*/
|
||||
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
|
||||
}
|
||||
|
||||
static __always_inline void intel_set_ssb_state(unsigned long tifn)
|
||||
{
|
||||
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
||||
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
||||
}
|
||||
|
||||
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
amd_set_ssb_virt_state(tifn);
|
||||
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
||||
amd_set_core_ssb_state(tifn);
|
||||
else
|
||||
intel_set_ssb_state(tifn);
|
||||
}
|
||||
|
||||
void speculative_store_bypass_update(unsigned long tif)
|
||||
{
|
||||
preempt_disable();
|
||||
__speculative_store_bypass_update(tif);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
struct tss_struct *tss)
|
||||
{
|
||||
|
@ -309,6 +452,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|||
|
||||
if ((tifp ^ tifn) & _TIF_NOCPUID)
|
||||
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
|
||||
|
||||
if ((tifp ^ tifn) & _TIF_SSBD)
|
||||
__speculative_store_bypass_update(tifn);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -79,6 +79,7 @@
|
|||
#include <asm/qspinlock.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
int smp_num_siblings = 1;
|
||||
|
@ -244,6 +245,8 @@ static void notrace start_secondary(void *unused)
|
|||
*/
|
||||
check_tsc_sync_target();
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
|
||||
/*
|
||||
* Lock vector_lock, set CPU online and bring the vector
|
||||
* allocator online. Online must be set with vector_lock held
|
||||
|
@ -1292,6 +1295,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|||
set_mtrr_aps_delayed_init();
|
||||
|
||||
smp_quirk_init_udelay();
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
}
|
||||
|
||||
void arch_enable_nonboot_cpus_begin(void)
|
||||
|
|
|
@ -379,7 +379,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|||
|
||||
/* cpuid 0x80000008.ebx */
|
||||
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
|
||||
F(IBPB) | F(IBRS);
|
||||
F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
|
||||
|
||||
/* cpuid 0xC0000001.edx */
|
||||
const u32 kvm_cpuid_C000_0001_edx_x86_features =
|
||||
|
@ -407,7 +407,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|||
|
||||
/* cpuid 7.0.edx*/
|
||||
const u32 kvm_cpuid_7_0_edx_x86_features =
|
||||
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
|
||||
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
|
||||
F(ARCH_CAPABILITIES);
|
||||
|
||||
/* all calls to cpuid_count() should be made on the same cpu */
|
||||
|
@ -647,13 +647,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|||
g_phys_as = phys_as;
|
||||
entry->eax = g_phys_as | (virt_as << 8);
|
||||
entry->edx = 0;
|
||||
/* IBRS and IBPB aren't necessarily present in hardware cpuid */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB))
|
||||
entry->ebx |= F(IBPB);
|
||||
if (boot_cpu_has(X86_FEATURE_IBRS))
|
||||
entry->ebx |= F(IBRS);
|
||||
/*
|
||||
* IBRS, IBPB and VIRT_SSBD aren't necessarily present in
|
||||
* hardware cpuid
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
|
||||
entry->ebx |= F(AMD_IBPB);
|
||||
if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
|
||||
entry->ebx |= F(AMD_IBRS);
|
||||
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
entry->ebx |= F(VIRT_SSBD);
|
||||
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
|
||||
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
|
||||
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
||||
entry->ebx |= F(VIRT_SSBD);
|
||||
break;
|
||||
}
|
||||
case 0x80000019:
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
#include <asm/debugreg.h>
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
#include <asm/virtext.h>
|
||||
#include "trace.h"
|
||||
|
@ -213,6 +213,12 @@ struct vcpu_svm {
|
|||
} host;
|
||||
|
||||
u64 spec_ctrl;
|
||||
/*
|
||||
* Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
|
||||
* translated into the appropriate L2_CFG bits on the host to
|
||||
* perform speculative control.
|
||||
*/
|
||||
u64 virt_spec_ctrl;
|
||||
|
||||
u32 *msrpm;
|
||||
|
||||
|
@ -2060,6 +2066,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
|
||||
vcpu->arch.microcode_version = 0x01000065;
|
||||
svm->spec_ctrl = 0;
|
||||
svm->virt_spec_ctrl = 0;
|
||||
|
||||
if (!init_event) {
|
||||
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
|
||||
|
@ -4108,11 +4115,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
break;
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
if (!msr_info->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
|
||||
return 1;
|
||||
|
||||
msr_info->data = svm->spec_ctrl;
|
||||
break;
|
||||
case MSR_AMD64_VIRT_SPEC_CTRL:
|
||||
if (!msr_info->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
|
||||
return 1;
|
||||
|
||||
msr_info->data = svm->virt_spec_ctrl;
|
||||
break;
|
||||
case MSR_F15H_IC_CFG: {
|
||||
|
||||
int family, model;
|
||||
|
@ -4203,7 +4217,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||
break;
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
if (!msr->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
|
||||
return 1;
|
||||
|
||||
/* The STIBP bit doesn't fault even if it's not advertised */
|
||||
|
@ -4230,7 +4244,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||
break;
|
||||
case MSR_IA32_PRED_CMD:
|
||||
if (!msr->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
|
||||
return 1;
|
||||
|
||||
if (data & ~PRED_CMD_IBPB)
|
||||
|
@ -4244,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||
break;
|
||||
set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
|
||||
break;
|
||||
case MSR_AMD64_VIRT_SPEC_CTRL:
|
||||
if (!msr->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
|
||||
return 1;
|
||||
|
||||
if (data & ~SPEC_CTRL_SSBD)
|
||||
return 1;
|
||||
|
||||
svm->virt_spec_ctrl = data;
|
||||
break;
|
||||
case MSR_STAR:
|
||||
svm->vmcb->save.star = data;
|
||||
break;
|
||||
|
@ -5557,8 +5581,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
* is no need to worry about the conditional branch over the wrmsr
|
||||
* being speculatively taken.
|
||||
*/
|
||||
if (svm->spec_ctrl)
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
||||
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
|
||||
asm volatile (
|
||||
"push %%" _ASM_BP "; \n\t"
|
||||
|
@ -5652,6 +5675,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
#endif
|
||||
);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
|
||||
#else
|
||||
loadsegment(fs, svm->host.fs);
|
||||
#ifndef CONFIG_X86_32_LAZY_GS
|
||||
loadsegment(gs, svm->host.gs);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We do not use IBRS in the kernel. If this vCPU has used the
|
||||
* SPEC_CTRL MSR it may have left it on; save the value and
|
||||
|
@ -5670,20 +5705,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||
|
||||
if (svm->spec_ctrl)
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
|
||||
#else
|
||||
loadsegment(fs, svm->host.fs);
|
||||
#ifndef CONFIG_X86_32_LAZY_GS
|
||||
loadsegment(gs, svm->host.gs);
|
||||
#endif
|
||||
#endif
|
||||
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
|
||||
reload_tss(vcpu);
|
||||
|
||||
|
@ -5786,7 +5808,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool svm_has_high_real_mode_segbase(void)
|
||||
static bool svm_has_emulated_msr(int index)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -7012,7 +7034,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.hardware_enable = svm_hardware_enable,
|
||||
.hardware_disable = svm_hardware_disable,
|
||||
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
|
||||
.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
|
||||
.has_emulated_msr = svm_has_emulated_msr,
|
||||
|
||||
.vcpu_create = svm_create_vcpu,
|
||||
.vcpu_free = svm_free_vcpu,
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
#include <asm/apic.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/mshyperv.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
@ -3529,7 +3529,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
return kvm_get_msr_common(vcpu, msr_info);
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
if (!msr_info->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
||||
return 1;
|
||||
|
||||
|
@ -3648,12 +3647,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
break;
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
if (!msr_info->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
||||
return 1;
|
||||
|
||||
/* The STIBP bit doesn't fault even if it's not advertised */
|
||||
if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
|
||||
if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
|
||||
return 1;
|
||||
|
||||
vmx->spec_ctrl = data;
|
||||
|
@ -3679,7 +3677,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
break;
|
||||
case MSR_IA32_PRED_CMD:
|
||||
if (!msr_info->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
|
||||
return 1;
|
||||
|
||||
|
@ -9488,9 +9485,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
|
||||
|
||||
static bool vmx_has_high_real_mode_segbase(void)
|
||||
static bool vmx_has_emulated_msr(int index)
|
||||
{
|
||||
return enable_unrestricted_guest || emulate_invalid_guest_state;
|
||||
switch (index) {
|
||||
case MSR_IA32_SMBASE:
|
||||
/*
|
||||
* We cannot do SMM unless we can run the guest in big
|
||||
* real mode.
|
||||
*/
|
||||
return enable_unrestricted_guest || emulate_invalid_guest_state;
|
||||
case MSR_AMD64_VIRT_SPEC_CTRL:
|
||||
/* This is AMD only. */
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vmx_mpx_supported(void)
|
||||
|
@ -9722,8 +9731,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
* is no need to worry about the conditional branch over the wrmsr
|
||||
* being speculatively taken.
|
||||
*/
|
||||
if (vmx->spec_ctrl)
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
||||
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
|
||||
|
||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||
|
||||
|
@ -9871,8 +9879,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||
|
||||
if (vmx->spec_ctrl)
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
@ -12632,7 +12639,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.hardware_enable = hardware_enable,
|
||||
.hardware_disable = hardware_disable,
|
||||
.cpu_has_accelerated_tpr = report_flexpriority,
|
||||
.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
|
||||
.has_emulated_msr = vmx_has_emulated_msr,
|
||||
|
||||
.vm_init = vmx_vm_init,
|
||||
.vm_alloc = vmx_vm_alloc,
|
||||
|
|
|
@ -1061,6 +1061,7 @@ static u32 emulated_msrs[] = {
|
|||
MSR_SMI_COUNT,
|
||||
MSR_PLATFORM_INFO,
|
||||
MSR_MISC_FEATURES_ENABLES,
|
||||
MSR_AMD64_VIRT_SPEC_CTRL,
|
||||
};
|
||||
|
||||
static unsigned num_emulated_msrs;
|
||||
|
@ -2906,7 +2907,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
* fringe case that is not enabled except via specific settings
|
||||
* of the module parameters.
|
||||
*/
|
||||
r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
|
||||
r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
|
||||
break;
|
||||
case KVM_CAP_VAPIC:
|
||||
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
|
||||
|
@ -4606,14 +4607,8 @@ static void kvm_init_msr_list(void)
|
|||
num_msrs_to_save = j;
|
||||
|
||||
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
|
||||
switch (emulated_msrs[i]) {
|
||||
case MSR_IA32_SMBASE:
|
||||
if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
|
||||
continue;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
|
||||
continue;
|
||||
|
||||
if (j < i)
|
||||
emulated_msrs[j] = emulated_msrs[i];
|
||||
|
|
|
@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
|
||||
|
|
|
@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
|
||||
{ "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
|
||||
|
||||
/* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
|
||||
SD7SN6S256G and SD8SN8U256G */
|
||||
{ "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
|
||||
|
||||
/* devices which puke on READ_NATIVE_MAX */
|
||||
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
|
||||
{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
|
||||
|
@ -4549,13 +4553,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
ATA_HORKAGE_ZERO_AFTER_TRIM |
|
||||
ATA_HORKAGE_NOLPM, },
|
||||
|
||||
/* This specific Samsung model/firmware-rev does not handle LPM well */
|
||||
/* These specific Samsung models/firmware-revs do not handle LPM well */
|
||||
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
|
||||
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
|
||||
|
||||
/* Sandisk devices which are known to not handle LPM well */
|
||||
{ "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
|
||||
|
||||
/* devices that don't properly handle queued TRIM commands */
|
||||
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
|
|
|
@ -534,14 +534,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
|
|||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
&dev_attr_spectre_v1.attr,
|
||||
&dev_attr_spectre_v2.attr,
|
||||
&dev_attr_spec_store_bypass.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
||||
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||
bool check_nid)
|
||||
{
|
||||
unsigned long end_pfn = start_pfn + nr_pages;
|
||||
unsigned long pfn;
|
||||
|
@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
|
|||
|
||||
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
|
||||
|
||||
ret = register_mem_sect_under_node(mem_blk, nid, true);
|
||||
ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
|
||||
if (!err)
|
||||
err = ret;
|
||||
|
||||
|
|
|
@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
|||
|
||||
dev->power.wakeup_path = false;
|
||||
|
||||
if (dev->power.no_pm_callbacks) {
|
||||
ret = 1; /* Let device go direct_complete */
|
||||
if (dev->power.no_pm_callbacks)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (dev->pm_domain)
|
||||
callback = dev->pm_domain->ops.prepare;
|
||||
|
@ -1960,7 +1958,8 @@ unlock:
|
|||
*/
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
|
||||
pm_runtime_suspended(dev) && ret > 0 &&
|
||||
((pm_runtime_suspended(dev) && ret > 0) ||
|
||||
dev->power.no_pm_callbacks) &&
|
||||
!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
return 0;
|
||||
|
|
|
@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
|
|||
{
|
||||
int i;
|
||||
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
|
||||
char interrupts[20];
|
||||
char interrupts[25];
|
||||
char *ints = interrupts;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_name); i++)
|
||||
|
|
|
@ -1068,6 +1068,7 @@ static int loop_clr_fd(struct loop_device *lo)
|
|||
if (bdev) {
|
||||
bdput(bdev);
|
||||
invalidate_bdev(bdev);
|
||||
bdev->bd_inode->i_mapping->wb_err = 0;
|
||||
}
|
||||
set_capacity(lo->lo_disk, 0);
|
||||
loop_sysfs_exit(lo);
|
||||
|
|
|
@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
|
|||
const struct drm_display_mode *panel_mode;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (!state->crtc)
|
||||
return 0;
|
||||
|
||||
if (list_empty(&connector->modes)) {
|
||||
dev_dbg(lvds->dev, "connector: empty modes list\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
|
||||
|
||||
vmw_fb_refresh(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
|
|||
vmw_kms_resume(dev);
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
vmw_fb_refresh(dev_priv);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
|
|||
if (dev_priv->enable_fb)
|
||||
vmw_fb_on(dev_priv);
|
||||
|
||||
vmw_fb_refresh(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
|
|||
int vmw_fb_close(struct vmw_private *dev_priv);
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv);
|
||||
void vmw_fb_refresh(struct vmw_private *vmw_priv);
|
||||
|
||||
/**
|
||||
* Kernel modesetting - vmwgfx_kms.c
|
||||
|
|
|
@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
|
|||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = true;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
|
||||
/*
|
||||
* Need to reschedule a dirty update, because otherwise that's
|
||||
* only done in dirty_mark() if the previous coalesced
|
||||
* dirty region was empty.
|
||||
*/
|
||||
schedule_delayed_work(&par->local_work, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_fb_refresh - Refresh fb display
|
||||
*
|
||||
* @vmw_priv: Pointer to device private
|
||||
*
|
||||
* Call into kms to show the fbdev display(s).
|
||||
*/
|
||||
void vmw_fb_refresh(struct vmw_private *vmw_priv)
|
||||
{
|
||||
if (!vmw_priv->fb_info)
|
||||
return;
|
||||
|
||||
vmw_fb_set_par(vmw_priv->fb_info);
|
||||
}
|
||||
|
|
|
@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
|||
struct rpc_channel channel;
|
||||
char *msg, *reply = NULL;
|
||||
size_t reply_len = 0;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
if (!vmw_msg_enabled)
|
||||
return -ENODEV;
|
||||
|
@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
|
||||
vmw_send_msg(&channel, msg) ||
|
||||
vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
|
||||
vmw_close_channel(&channel)) {
|
||||
DRM_ERROR("Failed to get %s", guest_info_param);
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
|
||||
goto out_open;
|
||||
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (vmw_send_msg(&channel, msg) ||
|
||||
vmw_recv_msg(&channel, (void *) &reply, &reply_len))
|
||||
goto out_msg;
|
||||
|
||||
vmw_close_channel(&channel);
|
||||
if (buffer && reply && reply_len > 0) {
|
||||
/* Remove reply code, which are the first 2 characters of
|
||||
* the reply
|
||||
|
@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
|||
kfree(reply);
|
||||
kfree(msg);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
out_msg:
|
||||
vmw_close_channel(&channel);
|
||||
kfree(reply);
|
||||
out_open:
|
||||
*length = 0;
|
||||
kfree(msg);
|
||||
DRM_ERROR("Failed to get %s", guest_info_param);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
|
||||
vmw_send_msg(&channel, msg) ||
|
||||
vmw_close_channel(&channel)) {
|
||||
DRM_ERROR("Failed to send log\n");
|
||||
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
|
||||
goto out_open;
|
||||
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (vmw_send_msg(&channel, msg))
|
||||
goto out_msg;
|
||||
|
||||
vmw_close_channel(&channel);
|
||||
kfree(msg);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
out_msg:
|
||||
vmw_close_channel(&channel);
|
||||
out_open:
|
||||
kfree(msg);
|
||||
DRM_ERROR("Failed to send log\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -135,17 +135,24 @@
|
|||
|
||||
#else
|
||||
|
||||
/* In the 32-bit version of this macro, we use "m" because there is no
|
||||
* more register left for bp
|
||||
/*
|
||||
* In the 32-bit version of this macro, we store bp in a memory location
|
||||
* because we've ran out of registers.
|
||||
* Now we can't reference that memory location while we've modified
|
||||
* %esp or %ebp, so we first push it on the stack, just before we push
|
||||
* %ebp, and then when we need it we read it from the stack where we
|
||||
* just pushed it.
|
||||
*/
|
||||
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
|
||||
port_num, magic, bp, \
|
||||
eax, ebx, ecx, edx, si, di) \
|
||||
({ \
|
||||
asm volatile ("push %%ebp;" \
|
||||
"mov %12, %%ebp;" \
|
||||
asm volatile ("push %12;" \
|
||||
"push %%ebp;" \
|
||||
"mov 0x04(%%esp), %%ebp;" \
|
||||
"rep outsb;" \
|
||||
"pop %%ebp;" : \
|
||||
"pop %%ebp;" \
|
||||
"add $0x04, %%esp;" : \
|
||||
"=a"(eax), \
|
||||
"=b"(ebx), \
|
||||
"=c"(ecx), \
|
||||
|
@ -167,10 +174,12 @@
|
|||
port_num, magic, bp, \
|
||||
eax, ebx, ecx, edx, si, di) \
|
||||
({ \
|
||||
asm volatile ("push %%ebp;" \
|
||||
"mov %12, %%ebp;" \
|
||||
asm volatile ("push %12;" \
|
||||
"push %%ebp;" \
|
||||
"mov 0x04(%%esp), %%ebp;" \
|
||||
"rep insb;" \
|
||||
"pop %%ebp" : \
|
||||
"pop %%ebp;" \
|
||||
"add $0x04, %%esp;" : \
|
||||
"=a"(eax), \
|
||||
"=b"(ebx), \
|
||||
"=c"(ecx), \
|
||||
|
|
|
@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
umem->length = size;
|
||||
umem->address = addr;
|
||||
umem->page_shift = PAGE_SHIFT;
|
||||
umem->pid = get_task_pid(current, PIDTYPE_PID);
|
||||
/*
|
||||
* We ask for writable memory if any of the following
|
||||
* access flags are set. "Local write" and "remote write"
|
||||
|
@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
|
||||
|
||||
if (access & IB_ACCESS_ON_DEMAND) {
|
||||
put_pid(umem->pid);
|
||||
ret = ib_umem_odp_get(context, umem, access);
|
||||
if (ret) {
|
||||
kfree(umem);
|
||||
|
@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
|
||||
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
||||
if (!page_list) {
|
||||
put_pid(umem->pid);
|
||||
kfree(umem);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
@ -231,7 +228,6 @@ out:
|
|||
if (ret < 0) {
|
||||
if (need_release)
|
||||
__ib_umem_release(context->device, umem, 0);
|
||||
put_pid(umem->pid);
|
||||
kfree(umem);
|
||||
} else
|
||||
current->mm->pinned_vm = locked;
|
||||
|
@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
|
|||
|
||||
__ib_umem_release(umem->context->device, umem, 1);
|
||||
|
||||
task = get_pid_task(umem->pid, PIDTYPE_PID);
|
||||
put_pid(umem->pid);
|
||||
task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
|
||||
if (!task)
|
||||
goto out;
|
||||
mm = get_task_mm(task);
|
||||
|
|
|
@ -489,10 +489,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
|
|||
err_dereg_mem:
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
err_free_skb:
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
err_free_mhp:
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
|
|
|
@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
|
|||
u64 status;
|
||||
u32 sw_index;
|
||||
int i = 0;
|
||||
unsigned long irq_flags;
|
||||
|
||||
sw_index = dd->hw_to_sw[hw_context];
|
||||
if (sw_index >= dd->num_send_contexts) {
|
||||
|
@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
|
|||
return;
|
||||
}
|
||||
sci = &dd->send_contexts[sw_index];
|
||||
spin_lock_irqsave(&dd->sc_lock, irq_flags);
|
||||
sc = sci->sc;
|
||||
if (!sc) {
|
||||
dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
|
||||
sw_index, hw_context);
|
||||
spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
|
|||
*/
|
||||
if (sc->type != SC_USER)
|
||||
queue_work(dd->pport->hfi1_wq, &sc->halt_work);
|
||||
spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
|
||||
|
||||
/*
|
||||
* Update the counters for the corresponding status bits.
|
||||
|
|
|
@ -377,6 +377,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
|||
|
||||
hr_cq->set_ci_db = hr_cq->db.db_record;
|
||||
*hr_cq->set_ci_db = 0;
|
||||
hr_cq->db_en = 1;
|
||||
}
|
||||
|
||||
/* Init mmt table and write buff address to mtt table */
|
||||
|
|
|
@ -722,6 +722,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
|
|||
free_mr->mr_free_pd = to_hr_pd(pd);
|
||||
free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
|
||||
free_mr->mr_free_pd->ibpd.uobject = NULL;
|
||||
free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
|
||||
atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
|
||||
|
||||
attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
|
||||
|
@ -1036,7 +1037,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
|
|||
|
||||
do {
|
||||
ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
|
||||
if (ret < 0) {
|
||||
if (ret < 0 && hr_qp) {
|
||||
dev_err(dev,
|
||||
"(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
|
||||
hr_qp->qpn, ret, hr_mr->key, ne);
|
||||
|
|
|
@ -142,8 +142,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
unsigned long flags;
|
||||
unsigned int ind;
|
||||
void *wqe = NULL;
|
||||
u32 tmp_len = 0;
|
||||
bool loopback;
|
||||
u32 tmp_len;
|
||||
int ret = 0;
|
||||
u8 *smac;
|
||||
int nreq;
|
||||
|
@ -189,6 +189,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
|
||||
owner_bit =
|
||||
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
|
||||
tmp_len = 0;
|
||||
|
||||
/* Corresponding to the QP type, wqe process separately */
|
||||
if (ibqp->qp_type == IB_QPT_GSI) {
|
||||
|
@ -547,16 +548,20 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
}
|
||||
|
||||
if (i < hr_qp->rq.max_gs) {
|
||||
dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
|
||||
dseg[i].addr = 0;
|
||||
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
|
||||
dseg->addr = 0;
|
||||
}
|
||||
|
||||
/* rq support inline data */
|
||||
sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
|
||||
hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
|
||||
for (i = 0; i < wr->num_sge; i++) {
|
||||
sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
|
||||
sge_list[i].len = wr->sg_list[i].length;
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
|
||||
sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
|
||||
hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
|
||||
(u32)wr->num_sge;
|
||||
for (i = 0; i < wr->num_sge; i++) {
|
||||
sge_list[i].addr =
|
||||
(void *)(u64)wr->sg_list[i].addr;
|
||||
sge_list[i].len = wr->sg_list[i].length;
|
||||
}
|
||||
}
|
||||
|
||||
hr_qp->rq.wrid[ind] = wr->wr_id;
|
||||
|
@ -613,6 +618,8 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
|
|||
dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
|
||||
ring->desc_num * sizeof(struct hns_roce_cmq_desc),
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
ring->desc_dma_addr = 0;
|
||||
kfree(ring->desc);
|
||||
}
|
||||
|
||||
|
@ -1081,6 +1088,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
|
|||
if (ret) {
|
||||
dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get pf resource owned by every pf */
|
||||
|
@ -1372,6 +1380,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|||
|
||||
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
|
||||
mr->type == MR_TYPE_MR ? 0 : 1);
|
||||
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
|
||||
1);
|
||||
mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
|
||||
|
||||
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
|
||||
|
@ -2169,6 +2179,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
|
|||
struct hns_roce_v2_qp_context *context,
|
||||
struct hns_roce_v2_qp_context *qpc_mask)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
||||
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
||||
|
||||
/*
|
||||
|
@ -2281,7 +2292,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
|
|||
context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
|
||||
qpc_mask->rq_db_record_addr = 0;
|
||||
|
||||
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
|
||||
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
|
||||
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
|
||||
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
|
||||
|
||||
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
|
||||
|
@ -4703,6 +4715,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
|
|||
{0, }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
|
||||
|
||||
static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
|
||||
struct hnae3_handle *handle)
|
||||
{
|
||||
|
|
|
@ -199,7 +199,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
|
|||
|
||||
memset(props, 0, sizeof(*props));
|
||||
|
||||
props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid);
|
||||
props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
|
||||
props->max_mr_size = (u64)(~(0ULL));
|
||||
props->page_size_cap = hr_dev->caps.page_size_cap;
|
||||
props->vendor_id = hr_dev->vendor_id;
|
||||
|
|
|
@ -660,6 +660,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||
goto err_rq_sge_list;
|
||||
}
|
||||
*hr_qp->rdb.db_record = 0;
|
||||
hr_qp->rdb_en = 1;
|
||||
}
|
||||
|
||||
/* Allocate QP buf */
|
||||
|
@ -955,7 +956,14 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
}
|
||||
|
||||
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
|
||||
ret = 0;
|
||||
if (hr_dev->caps.min_wqes) {
|
||||
ret = -EPERM;
|
||||
dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
|
||||
new_state);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -207,6 +207,7 @@ struct i40iw_msix_vector {
|
|||
u32 irq;
|
||||
u32 cpu_affinity;
|
||||
u32 ceq_id;
|
||||
cpumask_t mask;
|
||||
};
|
||||
|
||||
struct l2params_work {
|
||||
|
|
|
@ -2093,7 +2093,7 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
|
|||
if (netif_is_bond_slave(netdev))
|
||||
netdev = netdev_master_upper_dev_get(netdev);
|
||||
|
||||
neigh = dst_neigh_lookup(dst, &dst_addr);
|
||||
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
|
||||
|
||||
rcu_read_lock();
|
||||
if (neigh) {
|
||||
|
|
|
@ -331,7 +331,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
|
|||
switch (info->ae_id) {
|
||||
case I40IW_AE_LLP_FIN_RECEIVED:
|
||||
if (qp->term_flags)
|
||||
continue;
|
||||
break;
|
||||
if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
|
||||
iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
|
||||
if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
|
||||
|
@ -360,7 +360,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
|
|||
break;
|
||||
case I40IW_AE_LLP_CONNECTION_RESET:
|
||||
if (atomic_read(&iwqp->close_timer_started))
|
||||
continue;
|
||||
break;
|
||||
i40iw_cm_disconn(iwqp);
|
||||
break;
|
||||
case I40IW_AE_QP_SUSPEND_COMPLETE:
|
||||
|
|
|
@ -687,7 +687,6 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
|
|||
struct i40iw_msix_vector *msix_vec)
|
||||
{
|
||||
enum i40iw_status_code status;
|
||||
cpumask_t mask;
|
||||
|
||||
if (iwdev->msix_shared && !ceq_id) {
|
||||
tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
|
||||
|
@ -697,9 +696,9 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
|
|||
status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
|
||||
}
|
||||
|
||||
cpumask_clear(&mask);
|
||||
cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
|
||||
irq_set_affinity_hint(msix_vec->irq, &mask);
|
||||
cpumask_clear(&msix_vec->mask);
|
||||
cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
|
||||
irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
|
||||
|
||||
if (status) {
|
||||
i40iw_pr_err("ceq irq config fail\n");
|
||||
|
|
|
@ -394,6 +394,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
|
|||
|
||||
list_for_each_entry(iwpbl, pbl_list, list) {
|
||||
if (iwpbl->user_base == va) {
|
||||
iwpbl->on_list = false;
|
||||
list_del(&iwpbl->list);
|
||||
return iwpbl;
|
||||
}
|
||||
|
@ -614,6 +615,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
iwqp = (struct i40iw_qp *)mem;
|
||||
iwqp->allocated_buffer = mem;
|
||||
qp = &iwqp->sc_qp;
|
||||
qp->back_qp = (void *)iwqp;
|
||||
qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
|
||||
|
@ -642,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
|
|||
goto error;
|
||||
}
|
||||
|
||||
iwqp->allocated_buffer = mem;
|
||||
iwqp->iwdev = iwdev;
|
||||
iwqp->iwpd = iwpd;
|
||||
iwqp->ibqp.qp_num = qp_num;
|
||||
|
@ -1898,6 +1899,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
|
|||
goto error;
|
||||
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
|
||||
list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
|
||||
iwpbl->on_list = true;
|
||||
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
|
||||
break;
|
||||
case IW_MEMREG_TYPE_CQ:
|
||||
|
@ -1908,6 +1910,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
|
|||
|
||||
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
|
||||
iwpbl->on_list = true;
|
||||
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
break;
|
||||
case IW_MEMREG_TYPE_MEM:
|
||||
|
@ -2045,14 +2048,18 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
|
|||
switch (iwmr->type) {
|
||||
case IW_MEMREG_TYPE_CQ:
|
||||
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
if (!list_empty(&ucontext->cq_reg_mem_list))
|
||||
if (iwpbl->on_list) {
|
||||
iwpbl->on_list = false;
|
||||
list_del(&iwpbl->list);
|
||||
}
|
||||
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
break;
|
||||
case IW_MEMREG_TYPE_QP:
|
||||
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
|
||||
if (!list_empty(&ucontext->qp_reg_mem_list))
|
||||
if (iwpbl->on_list) {
|
||||
iwpbl->on_list = false;
|
||||
list_del(&iwpbl->list);
|
||||
}
|
||||
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -78,6 +78,7 @@ struct i40iw_pbl {
|
|||
};
|
||||
|
||||
bool pbl_allocated;
|
||||
bool on_list;
|
||||
u64 user_base;
|
||||
struct i40iw_pble_alloc pble_alloc;
|
||||
struct i40iw_mr *iwmr;
|
||||
|
|
|
@ -2416,7 +2416,7 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
|
|||
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
|
||||
}
|
||||
|
||||
static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
|
||||
static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
|
||||
bool inner)
|
||||
{
|
||||
if (inner) {
|
||||
|
|
|
@ -484,11 +484,6 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int first_med_bfreg(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
enum {
|
||||
/* this is the first blue flame register in the array of bfregs assigned
|
||||
* to a processes. Since we do not use it for blue flame but rather
|
||||
|
@ -514,6 +509,12 @@ static int num_med_bfreg(struct mlx5_ib_dev *dev,
|
|||
return n >= 0 ? n : 0;
|
||||
}
|
||||
|
||||
static int first_med_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int first_hi_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
|
@ -541,10 +542,13 @@ static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
|
|||
static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_bfreg_info *bfregi)
|
||||
{
|
||||
int minidx = first_med_bfreg();
|
||||
int minidx = first_med_bfreg(dev, bfregi);
|
||||
int i;
|
||||
|
||||
for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
|
||||
if (minidx < 0)
|
||||
return minidx;
|
||||
|
||||
for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
|
||||
if (bfregi->count[i] < bfregi->count[minidx])
|
||||
minidx = i;
|
||||
if (!bfregi->count[minidx])
|
||||
|
|
|
@ -401,49 +401,47 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
|||
{
|
||||
struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
|
||||
struct qedr_dev *dev = get_qedr_dev(context->device);
|
||||
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
|
||||
u64 unmapped_db = dev->db_phys_addr;
|
||||
unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
|
||||
unsigned long len = (vma->vm_end - vma->vm_start);
|
||||
int rc = 0;
|
||||
bool found;
|
||||
unsigned long dpi_start;
|
||||
|
||||
dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT,
|
||||
"qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
|
||||
vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
|
||||
if (vma->vm_start & (PAGE_SIZE - 1)) {
|
||||
DP_ERR(dev, "Vma_start not page aligned = %ld\n",
|
||||
vma->vm_start);
|
||||
"mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
|
||||
(void *)vma->vm_start, (void *)vma->vm_end,
|
||||
(void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
|
||||
|
||||
if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
|
||||
DP_ERR(dev,
|
||||
"failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
|
||||
(void *)vma->vm_start, (void *)vma->vm_end);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
found = qedr_search_mmap(ucontext, vm_page, len);
|
||||
if (!found) {
|
||||
DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
|
||||
if (!qedr_search_mmap(ucontext, phys_addr, len)) {
|
||||
DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
|
||||
vma->vm_pgoff);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
|
||||
|
||||
if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
|
||||
dev->db_size))) {
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
|
||||
if (vma->vm_flags & VM_READ) {
|
||||
DP_ERR(dev, "Trying to map doorbell bar for read\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
PAGE_SIZE, vma->vm_page_prot);
|
||||
} else {
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
|
||||
rc = remap_pfn_range(vma, vma->vm_start,
|
||||
vma->vm_pgoff, len, vma->vm_page_prot);
|
||||
if (phys_addr < dpi_start ||
|
||||
((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
|
||||
DP_ERR(dev,
|
||||
"failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
|
||||
(void *)phys_addr, (void *)dpi_start,
|
||||
ucontext->dpi_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
|
||||
return rc;
|
||||
|
||||
if (vma->vm_flags & VM_READ) {
|
||||
DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
|
||||
|
|
|
@ -761,7 +761,6 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
|
|||
unsigned int mask;
|
||||
unsigned int length = 0;
|
||||
int i;
|
||||
int must_sched;
|
||||
|
||||
while (wr) {
|
||||
mask = wr_opcode_mask(wr->opcode, qp);
|
||||
|
@ -791,14 +790,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
|
|||
wr = wr->next;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must sched in case of GSI QP because ib_send_mad() hold irq lock,
|
||||
* and the requester call ip_local_out_sk() that takes spin_lock_bh.
|
||||
*/
|
||||
must_sched = (qp_type(qp) == IB_QPT_GSI) ||
|
||||
(queue_count(qp->sq.queue) > 1);
|
||||
|
||||
rxe_run_task(&qp->req.task, must_sched);
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
if (unlikely(qp->req.state == QP_STATE_ERROR))
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config INFINIBAND_SRPT
|
||||
tristate "InfiniBand SCSI RDMA Protocol target support"
|
||||
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
|
||||
depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
|
||||
---help---
|
||||
|
||||
Support for the SCSI RDMA Protocol (SRP) Target driver. The
|
||||
|
|
|
@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
|
|||
** Receive and process command from user mode utility
|
||||
*/
|
||||
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
||||
int length,
|
||||
int length, void *mptr,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
|
||||
diva_os_xdi_adapter_t *a = NULL;
|
||||
diva_os_spin_lock_magic_t old_irql;
|
||||
struct list_head *tmp;
|
||||
|
@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
|||
length, sizeof(diva_xdi_um_cfg_cmd_t)))
|
||||
return NULL;
|
||||
}
|
||||
if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
|
||||
if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
|
||||
DBG_ERR(("A: A(?) open, write error"))
|
||||
return NULL;
|
||||
}
|
||||
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
|
||||
list_for_each(tmp, &adapter_queue) {
|
||||
a = list_entry(tmp, diva_os_xdi_adapter_t, link);
|
||||
if (a->controller == (int)msg.adapter)
|
||||
if (a->controller == (int)msg->adapter)
|
||||
break;
|
||||
a = NULL;
|
||||
}
|
||||
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
|
||||
|
||||
if (!a) {
|
||||
DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
|
||||
DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
|
||||
}
|
||||
|
||||
return (a);
|
||||
|
@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
|
|||
|
||||
int
|
||||
diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
||||
int length, divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
int length, void *mptr,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
|
||||
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
|
||||
void *data;
|
||||
|
||||
|
@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
|||
return (-2);
|
||||
}
|
||||
|
||||
length = (*cp_fn) (os_handle, data, src, length);
|
||||
if (msg) {
|
||||
*(diva_xdi_um_cfg_cmd_t *)data = *msg;
|
||||
length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
|
||||
src + sizeof(*msg), length - sizeof(*msg));
|
||||
} else {
|
||||
length = (*cp_fn) (os_handle, data, src, length);
|
||||
}
|
||||
if (length > 0) {
|
||||
if ((*(a->interface.cmd_proc))
|
||||
(a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
|
||||
|
|
|
@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
|
|||
int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
|
||||
|
||||
int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
|
||||
int length, divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
int length, void *msg,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
|
||||
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
|
||||
int length,
|
||||
int length, void *msg,
|
||||
divas_xdi_copy_from_user_fn_t cp_fn);
|
||||
|
||||
void diva_xdi_close_adapter(void *adapter, void *os_handle);
|
||||
|
|
|
@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
|
|||
static ssize_t divas_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!file->private_data) {
|
||||
file->private_data = diva_xdi_open_adapter(file, buf,
|
||||
count,
|
||||
count, &msg,
|
||||
xdi_copy_from_user);
|
||||
}
|
||||
if (!file->private_data) {
|
||||
return (-ENODEV);
|
||||
if (!file->private_data)
|
||||
return (-ENODEV);
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, &msg, xdi_copy_from_user);
|
||||
} else {
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, NULL, xdi_copy_from_user);
|
||||
}
|
||||
|
||||
ret = diva_xdi_write(file->private_data, file,
|
||||
buf, count, xdi_copy_from_user);
|
||||
switch (ret) {
|
||||
case -1: /* Message should be removed from rx mailbox first */
|
||||
ret = -EBUSY;
|
||||
|
@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
|
|||
static ssize_t divas_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
diva_xdi_um_cfg_cmd_t msg;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!file->private_data) {
|
||||
file->private_data = diva_xdi_open_adapter(file, buf,
|
||||
count,
|
||||
count, &msg,
|
||||
xdi_copy_from_user);
|
||||
}
|
||||
if (!file->private_data) {
|
||||
|
|
|
@ -419,10 +419,25 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
|
|||
/* Verify that EC can process command */
|
||||
for (i = 0; i < len; i++) {
|
||||
rx_byte = rx_buf[i];
|
||||
/*
|
||||
* Seeing the PAST_END, RX_BAD_DATA, or NOT_READY
|
||||
* markers are all signs that the EC didn't fully
|
||||
* receive our command. e.g., if the EC is flashing
|
||||
* itself, it can't respond to any commands and instead
|
||||
* clocks out EC_SPI_PAST_END from its SPI hardware
|
||||
* buffer. Similar occurrences can happen if the AP is
|
||||
* too slow to clock out data after asserting CS -- the
|
||||
* EC will abort and fill its buffer with
|
||||
* EC_SPI_RX_BAD_DATA.
|
||||
*
|
||||
* In all cases, these errors should be safe to retry.
|
||||
* Report -EAGAIN and let the caller decide what to do
|
||||
* about that.
|
||||
*/
|
||||
if (rx_byte == EC_SPI_PAST_END ||
|
||||
rx_byte == EC_SPI_RX_BAD_DATA ||
|
||||
rx_byte == EC_SPI_NOT_READY) {
|
||||
ret = -EREMOTEIO;
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -431,7 +446,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
|
|||
if (!ret)
|
||||
ret = cros_ec_spi_receive_packet(ec_dev,
|
||||
ec_msg->insize + sizeof(*response));
|
||||
else
|
||||
else if (ret != -EAGAIN)
|
||||
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
|
||||
|
||||
final_ret = terminate_request(ec_dev);
|
||||
|
@ -537,10 +552,11 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
|
|||
/* Verify that EC can process command */
|
||||
for (i = 0; i < len; i++) {
|
||||
rx_byte = rx_buf[i];
|
||||
/* See comments in cros_ec_pkt_xfer_spi() */
|
||||
if (rx_byte == EC_SPI_PAST_END ||
|
||||
rx_byte == EC_SPI_RX_BAD_DATA ||
|
||||
rx_byte == EC_SPI_NOT_READY) {
|
||||
ret = -EREMOTEIO;
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -549,7 +565,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
|
|||
if (!ret)
|
||||
ret = cros_ec_spi_receive_response(ec_dev,
|
||||
ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
|
||||
else
|
||||
else if (ret != -EAGAIN)
|
||||
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
|
||||
|
||||
final_ret = terminate_request(ec_dev);
|
||||
|
|
|
@ -2485,7 +2485,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
|
|||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -33,6 +33,8 @@ struct sdhci_iproc_host {
|
|||
const struct sdhci_iproc_data *data;
|
||||
u32 shadow_cmd;
|
||||
u32 shadow_blk;
|
||||
bool is_cmd_shadowed;
|
||||
bool is_blk_shadowed;
|
||||
};
|
||||
|
||||
#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
|
||||
|
@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
|
|||
|
||||
static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u32 val = sdhci_iproc_readl(host, (reg & ~3));
|
||||
u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 val;
|
||||
u16 word;
|
||||
|
||||
if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
|
||||
/* Get the saved transfer mode */
|
||||
val = iproc_host->shadow_cmd;
|
||||
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
|
||||
iproc_host->is_blk_shadowed) {
|
||||
/* Get the saved block info */
|
||||
val = iproc_host->shadow_blk;
|
||||
} else {
|
||||
val = sdhci_iproc_readl(host, (reg & ~3));
|
||||
}
|
||||
word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
|
||||
return word;
|
||||
}
|
||||
|
||||
|
@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
|
|||
|
||||
if (reg == SDHCI_COMMAND) {
|
||||
/* Write the block now as we are issuing a command */
|
||||
if (iproc_host->shadow_blk != 0) {
|
||||
if (iproc_host->is_blk_shadowed) {
|
||||
sdhci_iproc_writel(host, iproc_host->shadow_blk,
|
||||
SDHCI_BLOCK_SIZE);
|
||||
iproc_host->shadow_blk = 0;
|
||||
iproc_host->is_blk_shadowed = false;
|
||||
}
|
||||
oldval = iproc_host->shadow_cmd;
|
||||
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
|
||||
iproc_host->is_cmd_shadowed = false;
|
||||
} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
|
||||
iproc_host->is_blk_shadowed) {
|
||||
/* Block size and count are stored in shadow reg */
|
||||
oldval = iproc_host->shadow_blk;
|
||||
} else {
|
||||
|
@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
|
|||
if (reg == SDHCI_TRANSFER_MODE) {
|
||||
/* Save the transfer mode until the command is issued */
|
||||
iproc_host->shadow_cmd = newval;
|
||||
iproc_host->is_cmd_shadowed = true;
|
||||
} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
|
||||
/* Save the block info until the command is issued */
|
||||
iproc_host->shadow_blk = newval;
|
||||
iproc_host->is_blk_shadowed = true;
|
||||
} else {
|
||||
/* Command or other regular 32-bit write */
|
||||
sdhci_iproc_writel(host, newval, reg & ~3);
|
||||
|
@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
|
|||
|
||||
static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
|
||||
.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
|
||||
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
|
||||
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
|
||||
.ops = &sdhci_iproc_32only_ops,
|
||||
};
|
||||
|
||||
|
@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
|
|||
.caps1 = SDHCI_DRIVER_TYPE_C |
|
||||
SDHCI_DRIVER_TYPE_D |
|
||||
SDHCI_SUPPORT_DDR50,
|
||||
.mmc_caps = MMC_CAP_1_8V_DDR,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
|
||||
|
|
|
@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!ioaddr) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("card has no PCI IO resources, aborting\n");
|
||||
return -ENODEV;
|
||||
err = -ENODEV;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
|
||||
if (err) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
|
||||
return err;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
|
||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||
pr_err("io address range already allocated\n");
|
||||
return -EBUSY;
|
||||
err = -EBUSY;
|
||||
goto err_disable_dev;
|
||||
}
|
||||
|
||||
err = pcnet32_probe1(ioaddr, 1, pdev);
|
||||
|
||||
err_disable_dev:
|
||||
if (err < 0)
|
||||
pci_disable_device(pdev);
|
||||
|
||||
|
|
|
@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
pci_set_master(pdev);
|
||||
|
||||
/* Query PCI controller on system for DMA addressing
|
||||
* limitation for the device. Try 64-bit first, and
|
||||
* limitation for the device. Try 47-bit first, and
|
||||
* fail to 32-bit.
|
||||
*/
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
|
||||
if (err) {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
|
@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
goto err_out_release_regions;
|
||||
}
|
||||
} else {
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to obtain %u-bit DMA "
|
||||
"for consistent allocations, aborting\n", 64);
|
||||
"for consistent allocations, aborting\n", 47);
|
||||
goto err_out_release_regions;
|
||||
}
|
||||
using_dac = 1;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
|
||||
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
|
||||
|
|
|
@ -1,20 +1,8 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Fast Ethernet Controller (ENET) PTP driver for MX6x.
|
||||
*
|
||||
* Copyright (C) 2012 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
|
|
@ -807,9 +807,11 @@ static int ibmvnic_login(struct net_device *netdev)
|
|||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
unsigned long timeout = msecs_to_jiffies(30000);
|
||||
int retry_count = 0;
|
||||
bool retry;
|
||||
int rc;
|
||||
|
||||
do {
|
||||
retry = false;
|
||||
if (retry_count > IBMVNIC_MAX_QUEUES) {
|
||||
netdev_warn(netdev, "Login attempts exceeded\n");
|
||||
return -1;
|
||||
|
@ -833,6 +835,9 @@ static int ibmvnic_login(struct net_device *netdev)
|
|||
retry_count++;
|
||||
release_sub_crqs(adapter, 1);
|
||||
|
||||
retry = true;
|
||||
netdev_dbg(netdev,
|
||||
"Received partial success, retrying...\n");
|
||||
adapter->init_done_rc = 0;
|
||||
reinit_completion(&adapter->init_done);
|
||||
send_cap_queries(adapter);
|
||||
|
@ -860,7 +865,7 @@ static int ibmvnic_login(struct net_device *netdev)
|
|||
netdev_warn(netdev, "Adapter login failed\n");
|
||||
return -1;
|
||||
}
|
||||
} while (adapter->init_done_rc == PARTIALSUCCESS);
|
||||
} while (retry);
|
||||
|
||||
/* handle pending MAC address changes after successful login */
|
||||
if (adapter->mac_change_pending) {
|
||||
|
@ -2736,18 +2741,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
|
|||
{
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
unsigned long rc;
|
||||
u64 val;
|
||||
|
||||
if (scrq->hw_irq > 0x100000000ULL) {
|
||||
dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
|
||||
return 1;
|
||||
}
|
||||
|
||||
val = (0xff000000) | scrq->hw_irq;
|
||||
rc = plpar_hcall_norets(H_EOI, val);
|
||||
if (rc)
|
||||
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
|
||||
val, rc);
|
||||
if (adapter->resetting &&
|
||||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
u64 val = (0xff000000) | scrq->hw_irq;
|
||||
|
||||
rc = plpar_hcall_norets(H_EOI, val);
|
||||
if (rc)
|
||||
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
|
||||
val, rc);
|
||||
}
|
||||
|
||||
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
|
||||
H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
|
||||
|
|
|
@ -43,12 +43,12 @@
|
|||
#include "fw.h"
|
||||
|
||||
/*
|
||||
* We allocate in as big chunks as we can, up to a maximum of 256 KB
|
||||
* per chunk.
|
||||
* We allocate in page size (default 4KB on many archs) chunks to avoid high
|
||||
* order memory allocations in fragmented/high usage memory situation.
|
||||
*/
|
||||
enum {
|
||||
MLX4_ICM_ALLOC_SIZE = 1 << 18,
|
||||
MLX4_TABLE_CHUNK_SIZE = 1 << 18
|
||||
MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
|
||||
};
|
||||
|
||||
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
|
||||
|
@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
|||
u64 size;
|
||||
|
||||
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
|
||||
if (WARN_ON(!obj_per_chunk))
|
||||
return -EINVAL;
|
||||
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
|
||||
table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
|
||||
table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
|
||||
if (!table->icm)
|
||||
return -ENOMEM;
|
||||
table->virt = virt;
|
||||
|
@ -446,7 +448,7 @@ err:
|
|||
mlx4_free_icm(dev, table->icm[i], use_coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
kvfree(table->icm);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
|
|||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
kvfree(table->icm);
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
|
|||
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
|
||||
mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
|
||||
mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
|
||||
dev_ctx->intf->protocol, enable ?
|
||||
"enabled" : "disabled");
|
||||
}
|
||||
|
|
|
@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
|
|||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
struct mlx4_qp *qp;
|
||||
|
||||
spin_lock(&qp_table->lock);
|
||||
spin_lock_irq(&qp_table->lock);
|
||||
|
||||
qp = __mlx4_qp_lookup(dev, qpn);
|
||||
|
||||
spin_unlock(&qp_table->lock);
|
||||
spin_unlock_irq(&qp_table->lock);
|
||||
return qp;
|
||||
}
|
||||
|
||||
|
|
|
@ -627,6 +627,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
|
|||
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
|
||||
}
|
||||
|
||||
static __be32 mlx5e_get_fcs(struct sk_buff *skb)
|
||||
{
|
||||
int last_frag_sz, bytes_in_prev, nr_frags;
|
||||
u8 *fcs_p1, *fcs_p2;
|
||||
skb_frag_t *last_frag;
|
||||
__be32 fcs_bytes;
|
||||
|
||||
if (!skb_is_nonlinear(skb))
|
||||
return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
|
||||
last_frag_sz = skb_frag_size(last_frag);
|
||||
|
||||
/* If all FCS data is in last frag */
|
||||
if (last_frag_sz >= ETH_FCS_LEN)
|
||||
return *(__be32 *)(skb_frag_address(last_frag) +
|
||||
last_frag_sz - ETH_FCS_LEN);
|
||||
|
||||
fcs_p2 = (u8 *)skb_frag_address(last_frag);
|
||||
bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
|
||||
|
||||
/* Find where the other part of the FCS is - Linear or another frag */
|
||||
if (nr_frags == 1) {
|
||||
fcs_p1 = skb_tail_pointer(skb);
|
||||
} else {
|
||||
skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
|
||||
|
||||
fcs_p1 = skb_frag_address(prev_frag) +
|
||||
skb_frag_size(prev_frag);
|
||||
}
|
||||
fcs_p1 -= bytes_in_prev;
|
||||
|
||||
memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
|
||||
memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
|
||||
|
||||
return fcs_bytes;
|
||||
}
|
||||
|
||||
static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
struct mlx5e_rq *rq,
|
||||
|
@ -655,6 +694,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
skb->csum = csum_partial(skb->data + ETH_HLEN,
|
||||
network_depth - ETH_HLEN,
|
||||
skb->csum);
|
||||
if (unlikely(netdev->features & NETIF_F_RXFCS))
|
||||
skb->csum = csum_add(skb->csum,
|
||||
(__force __wsum)mlx5e_get_fcs(skb));
|
||||
rq->stats.csum_complete++;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -234,19 +234,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
|
|||
context->buf.sg[0].data = &context->command;
|
||||
|
||||
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
|
||||
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
|
||||
if (!res)
|
||||
list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
|
||||
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
|
||||
res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
|
||||
if (res) {
|
||||
mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
|
||||
res);
|
||||
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
list_del(&context->list);
|
||||
spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
|
||||
mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
|
||||
kfree(context);
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
||||
/* Context will be freed by wait func after completion */
|
||||
return context;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
|
||||
|
||||
/* ILT entry structure */
|
||||
#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
|
||||
#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
|
||||
#define ILT_ENTRY_PHY_ADDR_SHIFT 0
|
||||
#define ILT_ENTRY_VALID_MASK 0x1ULL
|
||||
#define ILT_ENTRY_VALID_SHIFT 52
|
||||
|
|
|
@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
|
|||
return rc;
|
||||
|
||||
/* make rcal=100, since rdb default is 000 */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
|
||||
rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
|
||||
rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
|
|||
/* The register must be written to both the Shadow Register Select and
|
||||
* the Shadow Read Register Selector
|
||||
*/
|
||||
phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
|
||||
phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
|
||||
regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
|
||||
return phy_read(phydev, MII_BCM54XX_AUX_CTL);
|
||||
}
|
||||
|
|
|
@ -14,11 +14,18 @@
|
|||
#ifndef _LINUX_BCM_PHY_LIB_H
|
||||
#define _LINUX_BCM_PHY_LIB_H
|
||||
|
||||
#include <linux/brcmphy.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
|
||||
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
|
||||
|
||||
static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
|
||||
u16 reg, u16 val)
|
||||
{
|
||||
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
|
||||
}
|
||||
|
||||
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
|
||||
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
|
||||
|
||||
|
|
|
@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
|
|||
static void r_rc_cal_reset(struct phy_device *phydev)
|
||||
{
|
||||
/* Reset R_CAL/RC_CAL Engine */
|
||||
bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
|
||||
bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
|
||||
|
||||
/* Disable Reset R_AL/RC_CAL Engine */
|
||||
bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
|
||||
bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
|
||||
}
|
||||
|
||||
static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
|
||||
|
|
|
@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
|
||||
if (cmd == PPPIOCDETACH) {
|
||||
/*
|
||||
* We have to be careful here... if the file descriptor
|
||||
* has been dup'd, we could have another process in the
|
||||
* middle of a poll using the same file *, so we had
|
||||
* better not free the interface data structures -
|
||||
* instead we fail the ioctl. Even in this case, we
|
||||
* shut down the interface if we are the owner of it.
|
||||
* Actually, we should get rid of PPPIOCDETACH, userland
|
||||
* (i.e. pppd) could achieve the same effect by closing
|
||||
* this fd and reopening /dev/ppp.
|
||||
* PPPIOCDETACH is no longer supported as it was heavily broken,
|
||||
* and is only known to have been used by pppd older than
|
||||
* ppp-2.4.2 (released November 2003).
|
||||
*/
|
||||
pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
|
||||
current->comm, current->pid);
|
||||
err = -EINVAL;
|
||||
if (pf->kind == INTERFACE) {
|
||||
ppp = PF_TO_PPP(pf);
|
||||
rtnl_lock();
|
||||
if (file == ppp->owner)
|
||||
unregister_netdevice(ppp->dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
if (atomic_long_read(&file->f_count) < 2) {
|
||||
ppp_release(NULL, file);
|
||||
err = 0;
|
||||
} else
|
||||
pr_warn("PPPIOCDETACH file->f_count=%ld\n",
|
||||
atomic_long_read(&file->f_count));
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1436,6 +1436,13 @@ static void tun_net_init(struct net_device *dev)
|
|||
dev->max_mtu = MAX_MTU - dev->hard_header_len;
|
||||
}
|
||||
|
||||
static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
|
||||
{
|
||||
struct sock *sk = tfile->socket.sk;
|
||||
|
||||
return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
|
||||
}
|
||||
|
||||
/* Character device part */
|
||||
|
||||
/* Poll */
|
||||
|
@ -1458,10 +1465,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
|
|||
if (!ptr_ring_empty(&tfile->tx_ring))
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
|
||||
if (tun->dev->flags & IFF_UP &&
|
||||
(sock_writeable(sk) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
sock_writeable(sk))))
|
||||
/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
|
||||
* guarantee EPOLLOUT to be raised by either here or
|
||||
* tun_sock_write_space(). Then process could get notification
|
||||
* after it writes to a down device and meets -EIO.
|
||||
*/
|
||||
if (tun_sock_writeable(tun, tfile) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
tun_sock_writeable(tun, tfile)))
|
||||
mask |= EPOLLOUT | EPOLLWRNORM;
|
||||
|
||||
if (tun->dev->reg_state != NETREG_REGISTERED)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue