Merge 5.12-rc3 into staging-next
We need the staging fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
b828324bba
|
@ -23,6 +23,7 @@ properties:
|
|||
- enum:
|
||||
- ingenic,jz4775-intc
|
||||
- ingenic,jz4770-intc
|
||||
- ingenic,jz4760b-intc
|
||||
- const: ingenic,jz4760-intc
|
||||
- items:
|
||||
- const: ingenic,x1000-intc
|
||||
|
|
|
@ -613,6 +613,27 @@ Some of these date from the very introduction of KMS in 2008 ...
|
|||
|
||||
Level: Intermediate
|
||||
|
||||
Remove automatic page mapping from dma-buf importing
|
||||
----------------------------------------------------
|
||||
|
||||
When importing dma-bufs, the dma-buf and PRIME frameworks automatically map
|
||||
imported pages into the importer's DMA area. drm_gem_prime_fd_to_handle() and
|
||||
drm_gem_prime_handle_to_fd() require that importers call dma_buf_attach()
|
||||
even if they never do actual device DMA, but only CPU access through
|
||||
dma_buf_vmap(). This is a problem for USB devices, which do not support DMA
|
||||
operations.
|
||||
|
||||
To fix the issue, automatic page mappings should be removed from the
|
||||
buffer-sharing code. Fixing this is a bit more involved, since the import/export
|
||||
cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
|
||||
this problem for USB devices by fishing out the USB host controller device, as
|
||||
long as that supports DMA. Otherwise importing can still needlessly fail.
|
||||
|
||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
|
||||
|
||||
Level: Advanced
|
||||
|
||||
|
||||
Better Testing
|
||||
==============
|
||||
|
||||
|
|
|
@ -1988,7 +1988,7 @@ netif_carrier.
|
|||
If use_carrier is 0, then the MII monitor will first query the
|
||||
device's (via ioctl) MII registers and check the link state. If that
|
||||
request fails (not just that it returns carrier down), then the MII
|
||||
monitor will make an ethtool ETHOOL_GLINK request to attempt to obtain
|
||||
monitor will make an ethtool ETHTOOL_GLINK request to attempt to obtain
|
||||
the same information. If both methods fail (i.e., the driver either
|
||||
does not support or had some error in processing both the MII register
|
||||
and ethtool requests), then the MII monitor will assume the link is
|
||||
|
|
|
@ -142,73 +142,13 @@ Please send incremental versions on top of what has been merged in order to fix
|
|||
the patches the way they would look like if your latest patch series was to be
|
||||
merged.
|
||||
|
||||
How can I tell what patches are queued up for backporting to the various stable releases?
|
||||
-----------------------------------------------------------------------------------------
|
||||
Normally Greg Kroah-Hartman collects stable commits himself, but for
|
||||
networking, Dave collects up patches he deems critical for the
|
||||
networking subsystem, and then hands them off to Greg.
|
||||
|
||||
There is a patchworks queue that you can see here:
|
||||
|
||||
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
|
||||
|
||||
It contains the patches which Dave has selected, but not yet handed off
|
||||
to Greg. If Greg already has the patch, then it will be here:
|
||||
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
|
||||
|
||||
A quick way to find whether the patch is in this stable-queue is to
|
||||
simply clone the repo, and then git grep the mainline commit ID, e.g.
|
||||
::
|
||||
|
||||
stable-queue$ git grep -l 284041ef21fdf2e
|
||||
releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
|
||||
releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
|
||||
releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
|
||||
stable/stable-queue$
|
||||
|
||||
I see a network patch and I think it should be backported to stable. Should I request it via stable@vger.kernel.org like the references in the kernel's Documentation/process/stable-kernel-rules.rst file say?
|
||||
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
No, not for networking. Check the stable queues as per above first
|
||||
to see if it is already queued. If not, then send a mail to netdev,
|
||||
listing the upstream commit ID and why you think it should be a stable
|
||||
candidate.
|
||||
|
||||
Before you jump to go do the above, do note that the normal stable rules
|
||||
in :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
|
||||
still apply. So you need to explicitly indicate why it is a critical
|
||||
fix and exactly what users are impacted. In addition, you need to
|
||||
convince yourself that you *really* think it has been overlooked,
|
||||
vs. having been considered and rejected.
|
||||
|
||||
Generally speaking, the longer it has had a chance to "soak" in
|
||||
mainline, the better the odds that it is an OK candidate for stable. So
|
||||
scrambling to request a commit be added the day after it appears should
|
||||
be avoided.
|
||||
|
||||
I have created a network patch and I think it should be backported to stable. Should I add a Cc: stable@vger.kernel.org like the references in the kernel's Documentation/ directory say?
|
||||
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
No. See above answer. In short, if you think it really belongs in
|
||||
stable, then ensure you write a decent commit log that describes who
|
||||
gets impacted by the bug fix and how it manifests itself, and when the
|
||||
bug was introduced. If you do that properly, then the commit will get
|
||||
handled appropriately and most likely get put in the patchworks stable
|
||||
queue if it really warrants it.
|
||||
|
||||
If you think there is some valid information relating to it being in
|
||||
stable that does *not* belong in the commit log, then use the three dash
|
||||
marker line as described in
|
||||
:ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
|
||||
to temporarily embed that information into the patch that you send.
|
||||
|
||||
Are all networking bug fixes backported to all stable releases?
|
||||
Are there special rules regarding stable submissions on netdev?
|
||||
---------------------------------------------------------------
|
||||
Due to capacity, Dave could only take care of the backports for the
|
||||
last two stable releases. For earlier stable releases, each stable
|
||||
branch maintainer is supposed to take care of them. If you find any
|
||||
patch is missing from an earlier stable branch, please notify
|
||||
stable@vger.kernel.org with either a commit ID or a formal patch
|
||||
backported, and CC Dave and other relevant networking developers.
|
||||
While it used to be the case that netdev submissions were not supposed
|
||||
to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
|
||||
the case today. Please follow the standard stable rules in
|
||||
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
|
||||
and make sure you include appropriate Fixes tags!
|
||||
|
||||
Is the comment style convention different for the networking content?
|
||||
---------------------------------------------------------------------
|
||||
|
|
|
@ -35,12 +35,6 @@ Rules on what kind of patches are accepted, and which ones are not, into the
|
|||
Procedure for submitting patches to the -stable tree
|
||||
----------------------------------------------------
|
||||
|
||||
- If the patch covers files in net/ or drivers/net please follow netdev stable
|
||||
submission guidelines as described in
|
||||
:ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
|
||||
after first checking the stable networking queue at
|
||||
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
|
||||
to ensure the requested patch is not already queued up.
|
||||
- Security patches should not be handled (solely) by the -stable review
|
||||
process but should follow the procedures in
|
||||
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
|
||||
|
|
|
@ -250,11 +250,6 @@ should also read
|
|||
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
|
||||
in addition to this file.
|
||||
|
||||
Note, however, that some subsystem maintainers want to come to their own
|
||||
conclusions on which patches should go to the stable trees. The networking
|
||||
maintainer, in particular, would rather not see individual developers
|
||||
adding lines like the above to their patches.
|
||||
|
||||
If changes affect userland-kernel interfaces, please send the MAN-PAGES
|
||||
maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
|
||||
least a notification of the change, so that some information makes its way
|
||||
|
|
|
@ -182,6 +182,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can
|
|||
be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
|
||||
ioctl() at run-time.
|
||||
|
||||
Creation of the VM will fail if the requested IPA size (whether it is
|
||||
implicit or explicit) is unsupported on the host.
|
||||
|
||||
Please note that configuring the IPA size does not affect the capability
|
||||
exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
|
||||
size of the address translated by the stage2 level (guest physical to
|
||||
|
|
11
MAINTAINERS
11
MAINTAINERS
|
@ -261,8 +261,8 @@ ABI/API
|
|||
L: linux-api@vger.kernel.org
|
||||
F: include/linux/syscalls.h
|
||||
F: kernel/sys_ni.c
|
||||
F: include/uapi/
|
||||
F: arch/*/include/uapi/
|
||||
X: include/uapi/
|
||||
X: arch/*/include/uapi/
|
||||
|
||||
ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
|
@ -5835,7 +5835,7 @@ M: David Airlie <airlied@linux.ie>
|
|||
M: Daniel Vetter <daniel@ffwll.ch>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
B: https://bugs.freedesktop.org/
|
||||
B: https://gitlab.freedesktop.org/drm
|
||||
C: irc://chat.freenode.net/dri-devel
|
||||
T: git git://anongit.freedesktop.org/drm/drm
|
||||
F: Documentation/devicetree/bindings/display/
|
||||
|
@ -10716,7 +10716,8 @@ F: drivers/net/ethernet/marvell/mvpp2/
|
|||
|
||||
MARVELL MWIFIEX WIRELESS DRIVER
|
||||
M: Amitkumar Karwar <amitkarwar@gmail.com>
|
||||
M: Ganapathi Bhat <ganapathi.bhat@nxp.com>
|
||||
M: Ganapathi Bhat <ganapathi017@gmail.com>
|
||||
M: Sharvari Harisangam <sharvari.harisangam@nxp.com>
|
||||
M: Xinming Hu <huxinming820@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -19165,7 +19166,7 @@ S: Maintained
|
|||
F: drivers/infiniband/hw/vmw_pvrdma/
|
||||
|
||||
VMware PVSCSI driver
|
||||
M: Jim Gill <jgill@vmware.com>
|
||||
M: Vishal Bhakta <vbhakta@vmware.com>
|
||||
M: VMware PV-Drivers <pv-drivers@vmware.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Frozen Wasteland
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -264,7 +264,8 @@ no-dot-config-targets := $(clean-targets) \
|
|||
$(version_h) headers headers_% archheaders archscripts \
|
||||
%asm-generic kernelversion %src-pkg dt_binding_check \
|
||||
outputmakefile
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease
|
||||
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
|
||||
image_name
|
||||
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
|
||||
|
||||
config-build :=
|
||||
|
@ -478,6 +479,7 @@ USERINCLUDE := \
|
|||
-I$(objtree)/arch/$(SRCARCH)/include/generated/uapi \
|
||||
-I$(srctree)/include/uapi \
|
||||
-I$(objtree)/include/generated/uapi \
|
||||
-include $(srctree)/include/linux/compiler-version.h \
|
||||
-include $(srctree)/include/linux/kconfig.h
|
||||
|
||||
# Use LINUXINCLUDE when you must reference the include/ directory.
|
||||
|
|
|
@ -632,13 +632,12 @@ config HAS_LTO_CLANG
|
|||
def_bool y
|
||||
# Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
|
||||
depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD
|
||||
depends on $(success,test $(LLVM) -eq 1)
|
||||
depends on $(success,test $(LLVM_IAS) -eq 1)
|
||||
depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
|
||||
depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
|
||||
depends on ARCH_SUPPORTS_LTO_CLANG
|
||||
depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
|
||||
depends on !KASAN
|
||||
depends on !KASAN || KASAN_HW_TAGS
|
||||
depends on !GCOV_KERNEL
|
||||
help
|
||||
The compiler and Kconfig options support building with Clang's
|
||||
|
|
|
@ -348,6 +348,7 @@ config ARCH_EP93XX
|
|||
select ARM_AMBA
|
||||
imply ARM_PATCH_PHYS_VIRT
|
||||
select ARM_VIC
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select AUTO_ZRELADDR
|
||||
select CLKDEV_LOOKUP
|
||||
select CLKSRC_MMIO
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/memory.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/page.h>
|
||||
#include <xen/swiotlb-xen.h>
|
||||
|
||||
|
@ -109,7 +110,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
map_ops[i].status = GNTST_general_error;
|
||||
unmap.host_addr = map_ops[i].host_addr,
|
||||
unmap.handle = map_ops[i].handle;
|
||||
map_ops[i].handle = ~0;
|
||||
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (map_ops[i].flags & GNTMAP_device_map)
|
||||
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
@ -130,7 +131,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
||||
|
||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||
|
@ -145,7 +145,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
||||
|
||||
bool __set_phys_to_machine_multi(unsigned long pfn,
|
||||
unsigned long mfn, unsigned long nr_pages)
|
||||
|
|
|
@ -1055,8 +1055,6 @@ config HW_PERF_EVENTS
|
|||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y
|
||||
|
||||
config ARCH_WANT_HUGE_PMD_SHARE
|
||||
|
||||
config ARCH_HAS_CACHE_LINE_SIZE
|
||||
def_bool y
|
||||
|
||||
|
@ -1157,8 +1155,8 @@ config XEN
|
|||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int
|
||||
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "14" if ARM64_64K_PAGES
|
||||
default "12" if ARM64_16K_PAGES
|
||||
default "11"
|
||||
help
|
||||
The kernel memory allocator divides physically contiguous memory
|
||||
|
@ -1855,12 +1853,6 @@ config CMDLINE_FROM_BOOTLOADER
|
|||
the boot loader doesn't provide any, the default kernel command
|
||||
string provided in CMDLINE will be used.
|
||||
|
||||
config CMDLINE_EXTEND
|
||||
bool "Extend bootloader kernel arguments"
|
||||
help
|
||||
The command-line arguments provided by the boot loader will be
|
||||
appended to the default kernel command string.
|
||||
|
||||
config CMDLINE_FORCE
|
||||
bool "Always use the default kernel command string"
|
||||
help
|
||||
|
|
|
@ -47,10 +47,10 @@
|
|||
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
|
||||
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
|
||||
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
|
||||
|
@ -183,16 +183,16 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
|
|||
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
|
||||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
|
||||
int level);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_get_gic_config(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
|
|
|
@ -83,6 +83,11 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
|||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||
|
||||
|
@ -97,7 +102,8 @@ bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
|
|||
|
||||
void __noreturn hyp_panic(void);
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
u64 elr, u64 par);
|
||||
#endif
|
||||
|
||||
#endif /* __ARM64_KVM_HYP_H__ */
|
||||
|
|
|
@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
|
||||
|
||||
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
|
||||
#define page_to_virt(x) ({ \
|
||||
__typeof__(x) __page = x; \
|
||||
void *__addr = __va(page_to_phys(__page)); \
|
||||
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
|
||||
})
|
||||
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
|
||||
#else
|
||||
#define page_to_virt(x) ({ \
|
||||
|
|
|
@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
|||
extern u64 idmap_t0sz;
|
||||
extern u64 idmap_ptrs_per_pgd;
|
||||
|
||||
static inline bool __cpu_uses_extended_idmap(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
|
||||
return false;
|
||||
|
||||
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the extended ID map requires an extra level of translation table
|
||||
* to be configured.
|
||||
*/
|
||||
static inline bool __cpu_uses_extended_idmap_level(void)
|
||||
{
|
||||
return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure TCR.T0SZ is set to the provided value.
|
||||
*/
|
||||
|
|
|
@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
|
|||
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
|
||||
#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
|
||||
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
|
||||
|
|
|
@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
|||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_device(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_tagged(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
|
||||
#define pgprot_mhp pgprot_tagged
|
||||
/*
|
||||
* DMA allocations for non-coherent devices use what the Arm architecture calls
|
||||
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses
|
||||
|
|
|
@ -796,6 +796,11 @@
|
|||
#define ID_AA64MMFR0_PARANGE_48 0x5
|
||||
#define ID_AA64MMFR0_PARANGE_52 0x6
|
||||
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
|
||||
#else
|
||||
|
@ -962,13 +967,16 @@
|
|||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
|
||||
#elif defined(CONFIG_ARM64_64K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#endif
|
||||
|
||||
#define MVFR2_FPMISC_SHIFT 4
|
||||
|
|
|
@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
|
|||
*/
|
||||
adrp x5, __idmap_text_end
|
||||
clz x5, x5
|
||||
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
||||
cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
|
||||
b.ge 1f // .. then skip VA range extension
|
||||
|
||||
adr_l x6, idmap_t0sz
|
||||
|
@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow)
|
|||
SYM_FUNC_START(__enable_mmu)
|
||||
mrs x2, ID_AA64MMFR0_EL1
|
||||
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
|
||||
b.ne __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
|
||||
b.lt __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
|
||||
b.gt __no_granule_support
|
||||
update_early_cpu_boot_status 0, x2, x3
|
||||
adrp x2, idmap_pg_dir
|
||||
phys_to_ttbr x1, x1
|
||||
|
|
|
@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
|
|||
} while (1);
|
||||
}
|
||||
|
||||
static __init void parse_cmdline(void)
|
||||
static __init const u8 *get_bootargs_cmdline(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
|
||||
const u8 *prop;
|
||||
void *fdt;
|
||||
int node;
|
||||
|
||||
fdt = get_early_fdt_ptr();
|
||||
if (!fdt)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
node = fdt_path_offset(fdt, "/chosen");
|
||||
if (node < 0)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
||||
if (!prop)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
__parse_cmdline(prop, true);
|
||||
return strlen(prop) ? prop : NULL;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
|
||||
return;
|
||||
}
|
||||
static __init void parse_cmdline(void)
|
||||
{
|
||||
const u8 *prop = get_bootargs_cmdline();
|
||||
|
||||
out:
|
||||
if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
|
||||
__parse_cmdline(CONFIG_CMDLINE, true);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
|
||||
__parse_cmdline(prop, true);
|
||||
}
|
||||
|
||||
/* Keep checkers quiet */
|
||||
|
|
|
@ -101,6 +101,9 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
|
|||
/* Array containing bases of nVHE per-CPU memory regions. */
|
||||
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
|
||||
|
||||
/* PMU available static key */
|
||||
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
|
||||
|
||||
#endif /* CONFIG_KVM */
|
||||
|
||||
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
|
||||
|
|
|
@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
|
|||
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
|
||||
}
|
||||
|
||||
static inline u32 armv8pmu_read_evcntr(int idx)
|
||||
static inline u64 armv8pmu_read_evcntr(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
|
||||
|
|
|
@ -385,11 +385,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
* previously run on the same physical CPU, call into the
|
||||
* hypervisor code to nuke the relevant contexts.
|
||||
*
|
||||
* We might get preempted before the vCPU actually runs, but
|
||||
* over-invalidation doesn't affect correctness.
|
||||
*/
|
||||
if (*last_ran != vcpu->vcpu_id) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
|
||||
kvm_call_hyp(__kvm_flush_cpu_context, mmu);
|
||||
*last_ran = vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,10 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
|||
|
||||
// If the hyp context is loaded, go straight to hyp_panic
|
||||
get_loaded_vcpu x0, x1
|
||||
cbz x0, hyp_panic
|
||||
cbnz x0, 1f
|
||||
b hyp_panic
|
||||
|
||||
1:
|
||||
// The hyp context is saved so make sure it is restored to allow
|
||||
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
|
||||
// This makes use of __guest_exit to avoid duplication but sets the
|
||||
|
@ -94,7 +96,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
|
|||
// current state is saved to the guest context but it will only be
|
||||
// accurate if the guest had been completely restored.
|
||||
adr_this_cpu x0, kvm_hyp_ctxt, x1
|
||||
adr x1, hyp_panic
|
||||
adr_l x1, hyp_panic
|
||||
str x1, [x0, #CPU_XREG_OFFSET(30)]
|
||||
|
||||
get_vcpu_ptr x1, x0
|
||||
|
@ -146,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
|
|||
// Now restore the hyp regs
|
||||
restore_callee_saved_regs x2
|
||||
|
||||
set_loaded_vcpu xzr, x1, x2
|
||||
set_loaded_vcpu xzr, x2, x3
|
||||
|
||||
alternative_if ARM64_HAS_RAS_EXTN
|
||||
// If we have the RAS extensions we can consume a pending error
|
||||
|
|
|
@ -90,14 +90,17 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
|
||||
* EL1 instead of being trapped to EL2.
|
||||
*/
|
||||
if (kvm_arm_support_pmu_v3()) {
|
||||
write_sysreg(0, pmselr_el0);
|
||||
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
|
||||
}
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
}
|
||||
|
||||
static inline void __deactivate_traps_common(void)
|
||||
{
|
||||
write_sysreg(0, hstr_el2);
|
||||
if (kvm_arm_support_pmu_v3())
|
||||
write_sysreg(0, pmuserenr_el0);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,16 +58,24 @@ static void __debug_restore_spe(u64 pmscr_el1)
|
|||
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Disable and flush SPE data generation */
|
||||
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_switch_to_guest_common(vcpu);
|
||||
}
|
||||
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
__debug_switch_to_host_common(vcpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,8 @@ SYM_FUNC_START(__host_enter)
|
|||
SYM_FUNC_END(__host_enter)
|
||||
|
||||
/*
|
||||
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
* void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
|
||||
* u64 elr, u64 par);
|
||||
*/
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
/* Prepare and exit to the host's panic funciton. */
|
||||
|
@ -82,9 +83,11 @@ SYM_FUNC_START(__hyp_do_panic)
|
|||
hyp_kimg_va lr, x6
|
||||
msr elr_el2, lr
|
||||
|
||||
/* Set the panic format string. Use the, now free, LR as scratch. */
|
||||
ldr lr, =__hyp_panic_string
|
||||
hyp_kimg_va lr, x6
|
||||
mov x29, x0
|
||||
|
||||
/* Load the format string into x0 and arguments into x1-7 */
|
||||
ldr x0, =__hyp_panic_string
|
||||
hyp_kimg_va x0, x6
|
||||
|
||||
/* Load the format arguments into x1-7. */
|
||||
mov x6, x3
|
||||
|
@ -94,9 +97,7 @@ SYM_FUNC_START(__hyp_do_panic)
|
|||
mrs x5, hpfar_el2
|
||||
|
||||
/* Enter the host, conditionally restoring the host context. */
|
||||
cmp x0, xzr
|
||||
mov x0, lr
|
||||
b.eq __host_enter_without_restoring
|
||||
cbz x29, __host_enter_without_restoring
|
||||
b __host_enter_for_panic
|
||||
SYM_FUNC_END(__hyp_do_panic)
|
||||
|
||||
|
|
|
@ -46,11 +46,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
|
|||
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
|
||||
|
||||
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
|
||||
__kvm_flush_cpu_context(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
|
||||
|
@ -67,9 +67,9 @@ static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
|
|||
write_sysreg_el2(tmp, SYS_SCTLR);
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
|
||||
|
@ -115,10 +115,10 @@ static const hcall_t host_hcall[] = {
|
|||
HANDLE_FUNC(__kvm_flush_vm_context),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
|
||||
HANDLE_FUNC(__kvm_flush_cpu_context),
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__kvm_enable_ssbs),
|
||||
HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
|
||||
HANDLE_FUNC(__vgic_v3_get_gic_config),
|
||||
HANDLE_FUNC(__vgic_v3_read_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_write_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_init_lrs),
|
||||
|
|
|
@ -192,6 +192,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
|
||||
|
||||
__sysreg_save_state_nvhe(host_ctxt);
|
||||
/*
|
||||
* We must flush and disable the SPE buffer for nVHE, as
|
||||
* the translation regime(EL1&0) is going to be loaded with
|
||||
* that of the guest. And we must do this before we change the
|
||||
* translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
|
||||
* before we load guest Stage1.
|
||||
*/
|
||||
__debug_save_host_buffers_nvhe(vcpu);
|
||||
|
||||
__adjust_pc(vcpu);
|
||||
|
||||
|
@ -234,11 +242,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
|
||||
__fpsimd_save_fpexc32(vcpu);
|
||||
|
||||
__debug_switch_to_host(vcpu);
|
||||
/*
|
||||
* This must come after restoring the host sysregs, since a non-VHE
|
||||
* system may enable SPE here and make use of the TTBRs.
|
||||
*/
|
||||
__debug_switch_to_host(vcpu);
|
||||
__debug_restore_host_buffers_nvhe(vcpu);
|
||||
|
||||
if (pmu_switch_needed)
|
||||
__pmu_switch_to_host(host_ctxt);
|
||||
|
@ -257,7 +266,6 @@ void __noreturn hyp_panic(void)
|
|||
u64 spsr = read_sysreg_el2(SYS_SPSR);
|
||||
u64 elr = read_sysreg_el2(SYS_ELR);
|
||||
u64 par = read_sysreg_par();
|
||||
bool restore_host = true;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
|
@ -271,7 +279,7 @@ void __noreturn hyp_panic(void)
|
|||
__sysreg_restore_state_nvhe(host_ctxt);
|
||||
}
|
||||
|
||||
__hyp_do_panic(restore_host, spsr, elr, par);
|
||||
__hyp_do_panic(host_ctxt, spsr, elr, par);
|
||||
unreachable();
|
||||
}
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_host(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
||||
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
struct tlb_inv_context cxt;
|
||||
|
||||
|
@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
|
|
@ -223,6 +223,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
|||
goto out;
|
||||
|
||||
if (!table) {
|
||||
data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
|
||||
data->addr += kvm_granule_size(level);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -405,9 +405,45 @@ void __vgic_v3_init_lrs(void)
|
|||
__gic_v3_set_lr(0, i);
|
||||
}
|
||||
|
||||
u64 __vgic_v3_get_ich_vtr_el2(void)
|
||||
/*
|
||||
* Return the GIC CPU configuration:
|
||||
* - [31:0] ICH_VTR_EL2
|
||||
* - [62:32] RES0
|
||||
* - [63] MMIO (GICv2) capable
|
||||
*/
|
||||
u64 __vgic_v3_get_gic_config(void)
|
||||
{
|
||||
return read_gicreg(ICH_VTR_EL2);
|
||||
u64 val, sre = read_gicreg(ICC_SRE_EL1);
|
||||
unsigned long flags = 0;
|
||||
|
||||
/*
|
||||
* To check whether we have a MMIO-based (GICv2 compatible)
|
||||
* CPU interface, we need to disable the system register
|
||||
* view. To do that safely, we have to prevent any interrupt
|
||||
* from firing (which would be deadly).
|
||||
*
|
||||
* Note that this only makes sense on VHE, as interrupts are
|
||||
* already masked for nVHE as part of the exception entry to
|
||||
* EL2.
|
||||
*/
|
||||
if (has_vhe())
|
||||
flags = local_daif_save();
|
||||
|
||||
write_gicreg(0, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
val = read_gicreg(ICC_SRE_EL1);
|
||||
|
||||
write_gicreg(sre, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
if (has_vhe())
|
||||
local_daif_restore(flags);
|
||||
|
||||
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
|
||||
val |= read_gicreg(ICH_VTR_EL2);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
u64 __vgic_v3_read_vmcr(void)
|
||||
|
|
|
@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_host(&cxt);
|
||||
}
|
||||
|
||||
void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
||||
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
|
||||
{
|
||||
struct tlb_inv_context cxt;
|
||||
|
||||
|
@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
|||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
asm volatile("ic iallu");
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
|
|
|
@ -1312,8 +1312,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
* Prevent userspace from creating a memory region outside of the IPA
|
||||
* space addressable by the KVM guest IPA space.
|
||||
*/
|
||||
if (memslot->base_gfn + memslot->npages >=
|
||||
(kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||
if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
|
||||
return -EFAULT;
|
||||
|
||||
mmap_read_lock(current->mm);
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
|
||||
|
||||
static int kvm_is_in_guest(void)
|
||||
{
|
||||
return kvm_get_running_vcpu() != NULL;
|
||||
|
@ -48,6 +50,14 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
|||
|
||||
int kvm_perf_init(void)
|
||||
{
|
||||
/*
|
||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
||||
* hardware performance counters. This could ensure the presence of
|
||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0)
|
||||
static_branch_enable(&kvm_arm_pmu_available);
|
||||
|
||||
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||
}
|
||||
|
||||
|
|
|
@ -823,16 +823,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
return val & mask;
|
||||
}
|
||||
|
||||
bool kvm_arm_support_pmu_v3(void)
|
||||
{
|
||||
/*
|
||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
||||
* hardware performance counters. This could ensure the presence of
|
||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
||||
*/
|
||||
return (perf_num_counters() > 0);
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu))
|
||||
|
|
|
@ -311,23 +311,24 @@ int kvm_set_ipa_limit(void)
|
|||
}
|
||||
|
||||
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
|
||||
default:
|
||||
case 1:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
|
||||
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
|
||||
return -EINVAL;
|
||||
case 0:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
|
||||
break;
|
||||
case 2:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
|
||||
break;
|
||||
default:
|
||||
kvm_err("Unsupported value for TGRAN_2, giving up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
|
||||
WARN(kvm_ipa_limit < KVM_PHYS_SHIFT,
|
||||
"KVM IPA Size Limit (%d bits) is smaller than default size\n",
|
||||
kvm_ipa_limit);
|
||||
kvm_info("IPA Size Limit: %d bits\n", kvm_ipa_limit);
|
||||
kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
|
||||
((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
|
||||
" (Reduced IPA size, limited VM/VMM compatibility)" : ""));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -356,6 +357,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
|
|||
return -EINVAL;
|
||||
} else {
|
||||
phys_shift = KVM_PHYS_SHIFT;
|
||||
if (phys_shift > kvm_ipa_limit) {
|
||||
pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
|
||||
current->comm);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
|
|
|
@ -574,9 +574,13 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
|
|||
*/
|
||||
int vgic_v3_probe(const struct gic_kvm_info *info)
|
||||
{
|
||||
u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
|
||||
u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
|
||||
bool has_v2;
|
||||
int ret;
|
||||
|
||||
has_v2 = ich_vtr_el2 >> 63;
|
||||
ich_vtr_el2 = (u32)ich_vtr_el2;
|
||||
|
||||
/*
|
||||
* The ListRegs field is 5 bits, but there is an architectural
|
||||
* maximum of 16 list registers. Just ignore bit 4...
|
||||
|
@ -594,13 +598,15 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
|
|||
gicv4_enable ? "en" : "dis");
|
||||
}
|
||||
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
|
||||
if (!info->vcpu.start) {
|
||||
kvm_info("GICv3: no GICV resource entry\n");
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
} else if (!has_v2) {
|
||||
pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
|
||||
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
|
||||
pr_warn("GICV physical address 0x%llx not page aligned\n",
|
||||
(unsigned long long)info->vcpu.start);
|
||||
kvm_vgic_global_state.vcpu_base = 0;
|
||||
} else {
|
||||
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
|
||||
kvm_vgic_global_state.can_emulate_gicv2 = true;
|
||||
|
|
|
@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
phys_addr_t addr = pfn << PAGE_SHIFT;
|
||||
phys_addr_t addr = PFN_PHYS(pfn);
|
||||
|
||||
if ((addr >> PAGE_SHIFT) != pfn)
|
||||
/*
|
||||
* Ensure the upper PAGE_SHIFT bits are clear in the
|
||||
* pfn. Else it might lead to false positives when
|
||||
* some of the upper bits are set, but the lower bits
|
||||
* match a valid pfn.
|
||||
*/
|
||||
if (PHYS_PFN(addr) != pfn)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
{
|
||||
struct mem_section *ms;
|
||||
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
return 0;
|
||||
|
||||
if (!valid_section(__pfn_to_section(pfn)))
|
||||
ms = __pfn_to_section(pfn);
|
||||
if (!valid_section(ms))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* ZONE_DEVICE memory does not have the memblock entries.
|
||||
* memblock_is_map_memory() check for ZONE_DEVICE based
|
||||
* addresses will always fail. Even the normal hotplugged
|
||||
* memory will never have MEMBLOCK_NOMAP flag set in their
|
||||
* memblock entries. Skip memblock search for all non early
|
||||
* memory sections covering all of hotplug memory including
|
||||
* both normal and ZONE_DEVICE based.
|
||||
*/
|
||||
if (!early_section(ms))
|
||||
return pfn_section_valid(ms, pfn);
|
||||
}
|
||||
#endif
|
||||
return memblock_is_map_memory(addr);
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#define NO_BLOCK_MAPPINGS BIT(0)
|
||||
#define NO_CONT_MAPPINGS BIT(1)
|
||||
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
|
||||
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
||||
|
||||
u64 __section(".mmuoff.data.write") vabits_actual;
|
||||
|
@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
|
|||
* if MTE is present. Otherwise, it has the same attributes as
|
||||
* PAGE_KERNEL.
|
||||
*/
|
||||
__map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
|
||||
__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
|
||||
flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
|
|||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->r10 == -1 ? regs->r8:0;
|
||||
return regs->r10 == -1 ? -regs->r8:0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
|
|
|
@ -2013,27 +2013,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
|
|||
{
|
||||
struct syscall_get_set_args *args = data;
|
||||
struct pt_regs *pt = args->regs;
|
||||
unsigned long *krbs, cfm, ndirty;
|
||||
unsigned long *krbs, cfm, ndirty, nlocals, nouts;
|
||||
int i, count;
|
||||
|
||||
if (unw_unwind_to_user(info) < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We get here via a few paths:
|
||||
* - break instruction: cfm is shared with caller.
|
||||
* syscall args are in out= regs, locals are non-empty.
|
||||
* - epsinstruction: cfm is set by br.call
|
||||
* locals don't exist.
|
||||
*
|
||||
* For both cases argguments are reachable in cfm.sof - cfm.sol.
|
||||
* CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
|
||||
*/
|
||||
cfm = pt->cr_ifs;
|
||||
nlocals = (cfm >> 7) & 0x7f; /* aka sol */
|
||||
nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
|
||||
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
|
||||
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
|
||||
|
||||
count = 0;
|
||||
if (in_syscall(pt))
|
||||
count = min_t(int, args->n, cfm & 0x7f);
|
||||
count = min_t(int, args->n, nouts);
|
||||
|
||||
/* Iterate over outs. */
|
||||
for (i = 0; i < count; i++) {
|
||||
int j = ndirty + nlocals + i + args->i;
|
||||
if (args->rw)
|
||||
*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
|
||||
args->args[i];
|
||||
*ia64_rse_skip_regs(krbs, j) = args->args[i];
|
||||
else
|
||||
args->args[i] = *ia64_rse_skip_regs(krbs,
|
||||
ndirty + i + args->i);
|
||||
args->args[i] = *ia64_rse_skip_regs(krbs, j);
|
||||
}
|
||||
|
||||
if (!args->rw) {
|
||||
|
|
|
@ -171,7 +171,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
|
|||
#include <asm-generic/memory_model.h>
|
||||
#endif
|
||||
|
||||
#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
|
||||
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
|
||||
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -30,8 +30,8 @@ extern unsigned long memory_end;
|
|||
#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
|
||||
#define pfn_valid(pfn) ((pfn) < max_mapnr)
|
||||
|
||||
#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
|
||||
((void *)(kaddr) < (void *)memory_end))
|
||||
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET) && \
|
||||
((unsigned long)(kaddr) < memory_end))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
||||
/*
|
||||
* These two variables specify the free mem region
|
||||
|
@ -120,6 +121,13 @@ void decompress_kernel(unsigned long boot_heap_start)
|
|||
/* last four bytes is always image size in little endian */
|
||||
image_size = get_unaligned_le32((void *)&__image_end - 4);
|
||||
|
||||
/* The device tree's address must be properly aligned */
|
||||
image_size = ALIGN(image_size, STRUCT_ALIGNMENT);
|
||||
|
||||
puts("Copy device tree to address ");
|
||||
puthex(VMLINUX_LOAD_ADDRESS_ULL + image_size);
|
||||
puts("\n");
|
||||
|
||||
/* copy dtb to where the booted kernel will expect it */
|
||||
memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
|
||||
__appended_dtb, dtb_size);
|
||||
|
|
|
@ -12,8 +12,8 @@ AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
|
|||
obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
|
||||
poly1305-mips-y := poly1305-core.o poly1305-glue.o
|
||||
|
||||
perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32
|
||||
perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64
|
||||
perlasm-flavour-$(CONFIG_32BIT) := o32
|
||||
perlasm-flavour-$(CONFIG_64BIT) := 64
|
||||
|
||||
quiet_cmd_perlasm = PERLASM $@
|
||||
cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
|
||||
|
|
|
@ -24,8 +24,11 @@ extern void (*board_ebase_setup)(void);
|
|||
extern void (*board_cache_error_setup)(void);
|
||||
|
||||
extern int register_nmi_notifier(struct notifier_block *nb);
|
||||
extern void reserve_exception_space(phys_addr_t addr, unsigned long size);
|
||||
extern char except_vec_nmi[];
|
||||
|
||||
#define VECTORSPACING 0x100 /* for EI/VI mode */
|
||||
|
||||
#define nmi_notifier(fn, pri) \
|
||||
({ \
|
||||
static struct notifier_block fn##_nb = { \
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/elf.h>
|
||||
#include <asm/pgtable-bits.h>
|
||||
#include <asm/spram.h>
|
||||
#include <asm/traps.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "fpu-probe.h"
|
||||
|
@ -1628,6 +1629,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
c->cputype = CPU_BMIPS3300;
|
||||
__cpu_name[cpu] = "Broadcom BMIPS3300";
|
||||
set_elf_platform(cpu, "bmips3300");
|
||||
reserve_exception_space(0x400, VECTORSPACING * 64);
|
||||
break;
|
||||
case PRID_IMP_BMIPS43XX: {
|
||||
int rev = c->processor_id & PRID_REV_MASK;
|
||||
|
@ -1638,6 +1640,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
__cpu_name[cpu] = "Broadcom BMIPS4380";
|
||||
set_elf_platform(cpu, "bmips4380");
|
||||
c->options |= MIPS_CPU_RIXI;
|
||||
reserve_exception_space(0x400, VECTORSPACING * 64);
|
||||
} else {
|
||||
c->cputype = CPU_BMIPS4350;
|
||||
__cpu_name[cpu] = "Broadcom BMIPS4350";
|
||||
|
@ -1654,6 +1657,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
__cpu_name[cpu] = "Broadcom BMIPS5000";
|
||||
set_elf_platform(cpu, "bmips5000");
|
||||
c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
|
||||
reserve_exception_space(0x1000, VECTORSPACING * 64);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2133,6 +2137,8 @@ void cpu_probe(void)
|
|||
if (cpu == 0)
|
||||
__ua_limit = ~((1ull << cpu_vmbits) - 1);
|
||||
#endif
|
||||
|
||||
reserve_exception_space(0, 0x1000);
|
||||
}
|
||||
|
||||
void cpu_report(void)
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <asm/fpu.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#include "fpu-probe.h"
|
||||
|
||||
|
@ -158,6 +159,8 @@ void cpu_probe(void)
|
|||
cpu_set_fpu_opts(c);
|
||||
else
|
||||
cpu_set_nofpu_opts(c);
|
||||
|
||||
reserve_exception_space(0, 0x400);
|
||||
}
|
||||
|
||||
void cpu_report(void)
|
||||
|
|
|
@ -2009,13 +2009,16 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
|
|||
nmi_exit();
|
||||
}
|
||||
|
||||
#define VECTORSPACING 0x100 /* for EI/VI mode */
|
||||
|
||||
unsigned long ebase;
|
||||
EXPORT_SYMBOL_GPL(ebase);
|
||||
unsigned long exception_handlers[32];
|
||||
unsigned long vi_handlers[64];
|
||||
|
||||
void reserve_exception_space(phys_addr_t addr, unsigned long size)
|
||||
{
|
||||
memblock_reserve(addr, size);
|
||||
}
|
||||
|
||||
void __init *set_except_vector(int n, void *addr)
|
||||
{
|
||||
unsigned long handler = (unsigned long) addr;
|
||||
|
@ -2367,10 +2370,7 @@ void __init trap_init(void)
|
|||
|
||||
if (!cpu_has_mips_r2_r6) {
|
||||
ebase = CAC_BASE;
|
||||
ebase_pa = virt_to_phys((void *)ebase);
|
||||
vec_size = 0x400;
|
||||
|
||||
memblock_reserve(ebase_pa, vec_size);
|
||||
} else {
|
||||
if (cpu_has_veic || cpu_has_vint)
|
||||
vec_size = 0x200 + VECTORSPACING*64;
|
||||
|
|
|
@ -145,6 +145,7 @@ SECTIONS
|
|||
}
|
||||
|
||||
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
|
||||
STRUCT_ALIGN();
|
||||
.appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
|
||||
*(.appended_dtb)
|
||||
KEEP(*(.appended_dtb))
|
||||
|
@ -172,6 +173,11 @@ SECTIONS
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
|
||||
.fill : {
|
||||
FILL(0);
|
||||
BYTE(0);
|
||||
. = ALIGN(8);
|
||||
}
|
||||
__appended_dtb = .;
|
||||
/* leave space for appended DTB */
|
||||
. += 0x100000;
|
||||
|
|
|
@ -203,9 +203,12 @@ config PREFETCH
|
|||
def_bool y
|
||||
depends on PA8X00 || PA7200
|
||||
|
||||
config PARISC_HUGE_KERNEL
|
||||
def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST
|
||||
|
||||
config MLONGCALLS
|
||||
def_bool y if !MODULES || UBSAN || FTRACE
|
||||
bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
|
||||
def_bool y if PARISC_HUGE_KERNEL
|
||||
bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL
|
||||
depends on PA8X00
|
||||
help
|
||||
If you configure the kernel to include many drivers built-in instead
|
||||
|
|
|
@ -567,8 +567,6 @@ static const struct user_regset_view user_parisc_native_view = {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#include <linux/compat.h>
|
||||
|
||||
static int gpr32_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
struct membuf to)
|
||||
|
|
|
@ -73,9 +73,10 @@ void __patch_exception(int exc, unsigned long addr);
|
|||
#endif
|
||||
|
||||
#define OP_RT_RA_MASK 0xffff0000UL
|
||||
#define LIS_R2 0x3c020000UL
|
||||
#define ADDIS_R2_R12 0x3c4c0000UL
|
||||
#define ADDI_R2_R2 0x38420000UL
|
||||
#define LIS_R2 (PPC_INST_ADDIS | __PPC_RT(R2))
|
||||
#define ADDIS_R2_R12 (PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
|
||||
#define ADDI_R2_R2 (PPC_INST_ADDI | __PPC_RT(R2) | __PPC_RA(R2))
|
||||
|
||||
|
||||
static inline unsigned long ppc_function_entry(void *func)
|
||||
{
|
||||
|
|
|
@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
|
|||
#define mfdcr(rn) \
|
||||
({unsigned int rval; \
|
||||
if (__builtin_constant_p(rn) && rn < 1024) \
|
||||
asm volatile("mfdcr %0," __stringify(rn) \
|
||||
: "=r" (rval)); \
|
||||
asm volatile("mfdcr %0, %1" : "=r" (rval) \
|
||||
: "n" (rn)); \
|
||||
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
|
||||
rval = mfdcrx(rn); \
|
||||
else \
|
||||
|
@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
|
|||
#define mtdcr(rn, v) \
|
||||
do { \
|
||||
if (__builtin_constant_p(rn) && rn < 1024) \
|
||||
asm volatile("mtdcr " __stringify(rn) ",%0" \
|
||||
: : "r" (v)); \
|
||||
asm volatile("mtdcr %0, %1" \
|
||||
: : "n" (rn), "r" (v)); \
|
||||
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
|
||||
mtdcrx(rn, v); \
|
||||
else \
|
||||
|
|
|
@ -410,7 +410,6 @@ DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
|
|||
DECLARE_INTERRUPT_HANDLER(CacheLockingException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
|
||||
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
|
||||
DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
|
||||
DECLARE_INTERRUPT_HANDLER(WatchdogException);
|
||||
DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
|
||||
|
||||
|
@ -437,6 +436,8 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
|
|||
|
||||
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
|
||||
void replay_system_reset(void);
|
||||
void replay_soft_interrupts(void);
|
||||
|
||||
|
|
|
@ -228,7 +228,7 @@ enum {
|
|||
#define MMU_FTRS_ALWAYS 0
|
||||
#endif
|
||||
|
||||
static inline bool early_mmu_has_feature(unsigned long feature)
|
||||
static __always_inline bool early_mmu_has_feature(unsigned long feature)
|
||||
{
|
||||
if (MMU_FTRS_ALWAYS & feature)
|
||||
return true;
|
||||
|
@ -286,7 +286,7 @@ static inline void mmu_feature_keys_init(void)
|
|||
|
||||
}
|
||||
|
||||
static inline bool mmu_has_feature(unsigned long feature)
|
||||
static __always_inline bool mmu_has_feature(unsigned long feature)
|
||||
{
|
||||
return early_mmu_has_feature(feature);
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|||
#define TRAP_FLAGS_MASK 0x11
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#endif
|
||||
#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
|
||||
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
|
||||
|
@ -210,7 +210,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
|
|||
#define TRAP_FLAGS_MASK 0x1F
|
||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
||||
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
|
||||
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
|
||||
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
|
||||
|
|
|
@ -71,6 +71,16 @@ static inline void disable_kernel_vsx(void)
|
|||
{
|
||||
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
||||
}
|
||||
#else
|
||||
static inline void enable_kernel_vsx(void)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
static inline void disable_kernel_vsx(void)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
|
|
|
@ -113,7 +113,7 @@ struct vio_driver {
|
|||
const char *name;
|
||||
const struct vio_device_id *id_table;
|
||||
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
|
||||
int (*remove)(struct vio_dev *dev);
|
||||
void (*remove)(struct vio_dev *dev);
|
||||
/* A driver must have a get_desired_dma() function to
|
||||
* be loaded in a CMO environment if it uses DMA.
|
||||
*/
|
||||
|
|
|
@ -466,7 +466,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
|
|||
|
||||
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
|
||||
/* MSR[RI] is clear iff using SRR regs */
|
||||
.if IHSRR == EXC_HV_OR_STD
|
||||
.if IHSRR_IF_HVMODE
|
||||
BEGIN_FTR_SECTION
|
||||
xori r10,r10,MSR_RI
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
|
||||
|
|
|
@ -457,11 +457,12 @@ InstructionTLBMiss:
|
|||
cmplw 0,r1,r3
|
||||
#endif
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
#ifdef CONFIG_MODULES
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
#endif
|
||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||
|
@ -520,10 +521,11 @@ DataLoadTLBMiss:
|
|||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r2) /* get pmd entry */
|
||||
|
@ -597,10 +599,11 @@ DataStoreTLBMiss:
|
|||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r2) /* get pmd entry */
|
||||
|
|
|
@ -149,7 +149,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
|||
* enabled when the interrupt handler returns (indicating a process-context /
|
||||
* synchronous interrupt) then irqs_enabled should be true.
|
||||
*/
|
||||
static notrace inline bool __prep_irq_for_enabled_exit(bool clear_ri)
|
||||
static notrace __always_inline bool __prep_irq_for_enabled_exit(bool clear_ri)
|
||||
{
|
||||
/* This must be done with RI=1 because tracing may touch vmaps */
|
||||
trace_hardirqs_on();
|
||||
|
@ -436,7 +436,6 @@ again:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void unrecoverable_exception(struct pt_regs *regs);
|
||||
void preempt_schedule_irq(void);
|
||||
|
||||
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
|
||||
|
|
|
@ -2170,7 +2170,7 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
|
|||
* in the MSR is 0. This indicates that SRR0/1 are live, and that
|
||||
* we therefore lost state by taking this exception.
|
||||
*/
|
||||
DEFINE_INTERRUPT_HANDLER(unrecoverable_exception)
|
||||
void unrecoverable_exception(struct pt_regs *regs)
|
||||
{
|
||||
pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
|
||||
regs->trap, regs->nip, regs->msr);
|
||||
|
|
|
@ -904,7 +904,7 @@ static nokprobe_inline int do_vsx_load(struct instruction_op *op,
|
|||
if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
|
||||
return -EFAULT;
|
||||
|
||||
nr_vsx_regs = size / sizeof(__vector128);
|
||||
nr_vsx_regs = max(1ul, size / sizeof(__vector128));
|
||||
emulate_vsx_load(op, buf, mem, cross_endian);
|
||||
preempt_disable();
|
||||
if (reg < 32) {
|
||||
|
@ -951,7 +951,7 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op,
|
|||
if (!address_ok(regs, ea, size))
|
||||
return -EFAULT;
|
||||
|
||||
nr_vsx_regs = size / sizeof(__vector128);
|
||||
nr_vsx_regs = max(1ul, size / sizeof(__vector128));
|
||||
preempt_disable();
|
||||
if (reg < 32) {
|
||||
/* FP regs + extensions */
|
||||
|
|
|
@ -222,7 +222,7 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *
|
|||
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
|
||||
*addrp = mfspr(SPRN_SDAR);
|
||||
|
||||
if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
|
||||
if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
|
||||
*addrp = 0;
|
||||
}
|
||||
|
||||
|
@ -507,7 +507,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
|
|||
* addresses, hence include a check before filtering code
|
||||
*/
|
||||
if (!(ppmu->flags & PPMU_ARCH_31) &&
|
||||
is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
|
||||
is_kernel_addr(addr) && event->attr.exclude_kernel)
|
||||
continue;
|
||||
|
||||
/* Branches are read most recent first (ie. mfbhrb 0 is
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Copyright 2006-2007 Michael Ellerman, IBM Corp.
|
||||
*/
|
||||
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/msi.h>
|
||||
|
@ -458,6 +459,26 @@ again:
|
|||
return hwirq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Depending on the number of online CPUs in the original
|
||||
* kernel, it is likely for CPU #0 to be offline in a kdump
|
||||
* kernel. The associated IRQs in the affinity mappings
|
||||
* provided by irq_create_affinity_masks() are thus not
|
||||
* started by irq_startup(), as per-design for managed IRQs.
|
||||
* This can be a problem with multi-queue block devices driven
|
||||
* by blk-mq : such a non-started IRQ is very likely paired
|
||||
* with the single queue enforced by blk-mq during kdump (see
|
||||
* blk_mq_alloc_tag_set()). This causes the device to remain
|
||||
* silent and likely hangs the guest at some point.
|
||||
*
|
||||
* We don't really care for fine-grained affinity when doing
|
||||
* kdump actually : simply ignore the pre-computed affinity
|
||||
* masks in this case and let the default mask with all CPUs
|
||||
* be used when creating the IRQ mappings.
|
||||
*/
|
||||
if (is_kdump_kernel())
|
||||
virq = irq_create_mapping(NULL, hwirq);
|
||||
else
|
||||
virq = irq_create_mapping_affinity(NULL, hwirq,
|
||||
entry->affinity);
|
||||
|
||||
|
|
|
@ -1261,7 +1261,6 @@ static int vio_bus_remove(struct device *dev)
|
|||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
struct vio_driver *viodrv = to_vio_driver(dev->driver);
|
||||
struct device *devptr;
|
||||
int ret = 1;
|
||||
|
||||
/*
|
||||
* Hold a reference to the device after the remove function is called
|
||||
|
@ -1270,13 +1269,13 @@ static int vio_bus_remove(struct device *dev)
|
|||
devptr = get_device(dev);
|
||||
|
||||
if (viodrv->remove)
|
||||
ret = viodrv->remove(viodev);
|
||||
viodrv->remove(viodev);
|
||||
|
||||
if (!ret && firmware_has_feature(FW_FEATURE_CMO))
|
||||
if (firmware_has_feature(FW_FEATURE_CMO))
|
||||
vio_cmo_bus_remove(viodev);
|
||||
|
||||
put_device(devptr);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -275,9 +275,9 @@ CONFIG_IP_VS_DH=m
|
|||
CONFIG_IP_VS_SH=m
|
||||
CONFIG_IP_VS_SED=m
|
||||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_IP_VS_TWOS=m
|
||||
CONFIG_IP_VS_FTP=m
|
||||
CONFIG_IP_VS_PE_SIP=m
|
||||
CONFIG_NF_TABLES_IPV4=y
|
||||
CONFIG_NFT_FIB_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=y
|
||||
CONFIG_IP_NF_IPTABLES=m
|
||||
|
@ -298,7 +298,6 @@ CONFIG_IP_NF_SECURITY=m
|
|||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_TABLES_IPV6=y
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
|
@ -481,7 +480,6 @@ CONFIG_NLMON=m
|
|||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_AURORA is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CADENCE is not set
|
||||
|
@ -581,7 +579,6 @@ CONFIG_VIRTIO_BALLOON=m
|
|||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
CONFIG_S390_CCW_IOMMU=y
|
||||
CONFIG_S390_AP_IOMMU=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -635,6 +632,7 @@ CONFIG_NTFS_RW=y
|
|||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
|
@ -714,12 +712,8 @@ CONFIG_CRYPTO_VMAC=m
|
|||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
|
@ -731,7 +725,6 @@ CONFIG_CRYPTO_CAST6=m
|
|||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_SALSA20=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4=m
|
||||
|
@ -796,12 +789,9 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
|
|||
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
|
||||
CONFIG_SLUB_DEBUG_ON=y
|
||||
CONFIG_SLUB_STATS=y
|
||||
CONFIG_DEBUG_KMEMLEAK=y
|
||||
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_VM_VMACACHE=y
|
||||
CONFIG_DEBUG_VM_RB=y
|
||||
CONFIG_DEBUG_VM_PGFLAGS=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||
|
@ -838,6 +828,7 @@ CONFIG_BPF_KPROBE_OVERRIDE=y
|
|||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_FTRACE_STARTUP_TEST=y
|
||||
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
CONFIG_NOTIFIER_ERROR_INJECTION=m
|
||||
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_FAULT_INJECTION=y
|
||||
|
@ -861,4 +852,3 @@ CONFIG_PERCPU_TEST=m
|
|||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
CONFIG_TEST_BITOPS=m
|
||||
CONFIG_TEST_BPF=m
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
|
|
|
@ -266,9 +266,9 @@ CONFIG_IP_VS_DH=m
|
|||
CONFIG_IP_VS_SH=m
|
||||
CONFIG_IP_VS_SED=m
|
||||
CONFIG_IP_VS_NQ=m
|
||||
CONFIG_IP_VS_TWOS=m
|
||||
CONFIG_IP_VS_FTP=m
|
||||
CONFIG_IP_VS_PE_SIP=m
|
||||
CONFIG_NF_TABLES_IPV4=y
|
||||
CONFIG_NFT_FIB_IPV4=m
|
||||
CONFIG_NF_TABLES_ARP=y
|
||||
CONFIG_IP_NF_IPTABLES=m
|
||||
|
@ -289,7 +289,6 @@ CONFIG_IP_NF_SECURITY=m
|
|||
CONFIG_IP_NF_ARPTABLES=m
|
||||
CONFIG_IP_NF_ARPFILTER=m
|
||||
CONFIG_IP_NF_ARP_MANGLE=m
|
||||
CONFIG_NF_TABLES_IPV6=y
|
||||
CONFIG_NFT_FIB_IPV6=m
|
||||
CONFIG_IP6_NF_IPTABLES=m
|
||||
CONFIG_IP6_NF_MATCH_AH=m
|
||||
|
@ -473,7 +472,6 @@ CONFIG_NLMON=m
|
|||
# CONFIG_NET_VENDOR_AQUANTIA is not set
|
||||
# CONFIG_NET_VENDOR_ARC is not set
|
||||
# CONFIG_NET_VENDOR_ATHEROS is not set
|
||||
# CONFIG_NET_VENDOR_AURORA is not set
|
||||
# CONFIG_NET_VENDOR_BROADCOM is not set
|
||||
# CONFIG_NET_VENDOR_BROCADE is not set
|
||||
# CONFIG_NET_VENDOR_CADENCE is not set
|
||||
|
@ -573,7 +571,6 @@ CONFIG_VIRTIO_BALLOON=m
|
|||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
CONFIG_S390_CCW_IOMMU=y
|
||||
CONFIG_S390_AP_IOMMU=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -623,6 +620,7 @@ CONFIG_NTFS_RW=y
|
|||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_TMPFS_INODE64=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_CONFIGFS_FS=m
|
||||
CONFIG_ECRYPT_FS=m
|
||||
|
@ -703,12 +701,8 @@ CONFIG_CRYPTO_VMAC=m
|
|||
CONFIG_CRYPTO_CRC32=m
|
||||
CONFIG_CRYPTO_BLAKE2S=m
|
||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||
CONFIG_CRYPTO_RMD128=m
|
||||
CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
CONFIG_CRYPTO_ANUBIS=m
|
||||
|
@ -720,7 +714,6 @@ CONFIG_CRYPTO_CAST6=m
|
|||
CONFIG_CRYPTO_DES=m
|
||||
CONFIG_CRYPTO_FCRYPT=m
|
||||
CONFIG_CRYPTO_KHAZAD=m
|
||||
CONFIG_CRYPTO_SALSA20=m
|
||||
CONFIG_CRYPTO_SEED=m
|
||||
CONFIG_CRYPTO_SERPENT=m
|
||||
CONFIG_CRYPTO_SM4=m
|
||||
|
|
|
@ -26,7 +26,6 @@ CONFIG_CRASH_DUMP=y
|
|||
# CONFIG_SECCOMP is not set
|
||||
# CONFIG_GCC_PLUGINS is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_IBM_PARTITION=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_COMPACTION is not set
|
||||
# CONFIG_MIGRATION is not set
|
||||
|
@ -61,11 +60,9 @@ CONFIG_RAW_DRIVER=y
|
|||
# CONFIG_HID is not set
|
||||
# CONFIG_VIRTIO_MENU is not set
|
||||
# CONFIG_VHOST_MENU is not set
|
||||
# CONFIG_SURFACE_PLATFORMS is not set
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
# CONFIG_DNOTIFY is not set
|
||||
# CONFIG_INOTIFY_USER is not set
|
||||
CONFIG_CONFIGFS_FS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_LSM="yama,loadpin,safesetid,integrity"
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
|
||||
struct s390_idle_data {
|
||||
seqcount_t seqcount;
|
||||
unsigned long long idle_count;
|
||||
unsigned long long idle_time;
|
||||
unsigned long long clock_idle_enter;
|
||||
unsigned long long clock_idle_exit;
|
||||
unsigned long long timer_idle_enter;
|
||||
unsigned long long timer_idle_exit;
|
||||
unsigned long idle_count;
|
||||
unsigned long idle_time;
|
||||
unsigned long clock_idle_enter;
|
||||
unsigned long clock_idle_exit;
|
||||
unsigned long timer_idle_enter;
|
||||
unsigned long timer_idle_exit;
|
||||
unsigned long mt_cycles_enter[8];
|
||||
};
|
||||
|
||||
|
|
|
@ -98,10 +98,10 @@ extern unsigned char ptff_function_mask[16];
|
|||
|
||||
/* Query TOD offset result */
|
||||
struct ptff_qto {
|
||||
unsigned long long physical_clock;
|
||||
unsigned long long tod_offset;
|
||||
unsigned long long logical_tod_offset;
|
||||
unsigned long long tod_epoch_difference;
|
||||
unsigned long physical_clock;
|
||||
unsigned long tod_offset;
|
||||
unsigned long logical_tod_offset;
|
||||
unsigned long tod_epoch_difference;
|
||||
} __packed;
|
||||
|
||||
static inline int ptff_query(unsigned int nr)
|
||||
|
@ -151,9 +151,9 @@ struct ptff_qui {
|
|||
rc; \
|
||||
})
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
static inline unsigned long local_tick_disable(void)
|
||||
{
|
||||
unsigned long long old;
|
||||
unsigned long old;
|
||||
|
||||
old = S390_lowcore.clock_comparator;
|
||||
S390_lowcore.clock_comparator = clock_comparator_max;
|
||||
|
@ -161,7 +161,7 @@ static inline unsigned long long local_tick_disable(void)
|
|||
return old;
|
||||
}
|
||||
|
||||
static inline void local_tick_enable(unsigned long long comp)
|
||||
static inline void local_tick_enable(unsigned long comp)
|
||||
{
|
||||
S390_lowcore.clock_comparator = comp;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
|
@ -169,9 +169,9 @@ static inline void local_tick_enable(unsigned long long comp)
|
|||
|
||||
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
|
||||
|
||||
typedef unsigned long long cycles_t;
|
||||
typedef unsigned long cycles_t;
|
||||
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
static inline unsigned long get_tod_clock(void)
|
||||
{
|
||||
union tod_clock clk;
|
||||
|
||||
|
@ -179,10 +179,10 @@ static inline unsigned long long get_tod_clock(void)
|
|||
return clk.tod;
|
||||
}
|
||||
|
||||
static inline unsigned long long get_tod_clock_fast(void)
|
||||
static inline unsigned long get_tod_clock_fast(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
unsigned long long clk;
|
||||
unsigned long clk;
|
||||
|
||||
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
|
||||
return clk;
|
||||
|
@ -208,9 +208,9 @@ extern union tod_clock tod_clock_base;
|
|||
* Therefore preemption must be disabled, otherwise the returned
|
||||
* value is not guaranteed to be monotonic.
|
||||
*/
|
||||
static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
static inline unsigned long get_tod_clock_monotonic(void)
|
||||
{
|
||||
unsigned long long tod;
|
||||
unsigned long tod;
|
||||
|
||||
preempt_disable_notrace();
|
||||
tod = get_tod_clock() - tod_clock_base.tod;
|
||||
|
@ -237,7 +237,7 @@ static inline unsigned long long get_tod_clock_monotonic(void)
|
|||
* -> ns = (th * 125) + ((tl * 125) >> 9);
|
||||
*
|
||||
*/
|
||||
static inline unsigned long long tod_to_ns(unsigned long long todval)
|
||||
static inline unsigned long tod_to_ns(unsigned long todval)
|
||||
{
|
||||
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
|
||||
}
|
||||
|
@ -249,10 +249,10 @@ static inline unsigned long long tod_to_ns(unsigned long long todval)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a > (long long) b;
|
||||
return (long) a > (long) b;
|
||||
return a > b;
|
||||
}
|
||||
|
||||
|
@ -263,10 +263,10 @@ static inline int tod_after(unsigned long long a, unsigned long long b)
|
|||
*
|
||||
* Returns: true if a is later than b
|
||||
*/
|
||||
static inline int tod_after_eq(unsigned long long a, unsigned long long b)
|
||||
static inline int tod_after_eq(unsigned long a, unsigned long b)
|
||||
{
|
||||
if (MACHINE_HAS_SCC)
|
||||
return (long long) a >= (long long) b;
|
||||
return (long) a >= (long) b;
|
||||
return a >= b;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ void account_idle_time_irq(void)
|
|||
void arch_cpu_idle(void)
|
||||
{
|
||||
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
|
||||
unsigned long long idle_time;
|
||||
unsigned long idle_time;
|
||||
unsigned long psw_mask;
|
||||
|
||||
/* Wait for external, I/O or machine check interrupt. */
|
||||
|
@ -73,7 +73,7 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned long long idle_count;
|
||||
unsigned long idle_count;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
@ -82,14 +82,14 @@ static ssize_t show_idle_count(struct device *dev,
|
|||
if (READ_ONCE(idle->clock_idle_enter))
|
||||
idle_count++;
|
||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||
return sprintf(buf, "%llu\n", idle_count);
|
||||
return sprintf(buf, "%lu\n", idle_count);
|
||||
}
|
||||
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
|
||||
|
||||
static ssize_t show_idle_time(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned int seq;
|
||||
|
||||
|
@ -109,14 +109,14 @@ static ssize_t show_idle_time(struct device *dev,
|
|||
}
|
||||
}
|
||||
idle_time += in_idle;
|
||||
return sprintf(buf, "%llu\n", idle_time >> 12);
|
||||
return sprintf(buf, "%lu\n", idle_time >> 12);
|
||||
}
|
||||
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
|
||||
|
||||
u64 arch_cpu_idle_time(int cpu)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
|
||||
unsigned long long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned long now, idle_enter, idle_exit, in_idle;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
|
|
|
@ -269,7 +269,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
|
|||
case CPUMF_CTR_SET_MAX:
|
||||
/* The counter could not be associated to a counter set */
|
||||
return -EINVAL;
|
||||
};
|
||||
}
|
||||
|
||||
/* Initialize for using the CPU-measurement counter facility */
|
||||
if (!atomic_inc_not_zero(&num_events)) {
|
||||
|
|
|
@ -26,12 +26,10 @@
|
|||
#include <asm/timex.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
#include <asm/perf_cpum_cf_diag.h>
|
||||
#include <asm/hwctrset.h>
|
||||
|
||||
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
|
||||
#define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */
|
||||
/* interval in seconds */
|
||||
static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL;
|
||||
static unsigned int cf_diag_cpu_speed;
|
||||
static debug_info_t *cf_diag_dbg;
|
||||
|
||||
|
@ -729,7 +727,6 @@ static DEFINE_MUTEX(cf_diag_ctrset_mutex);
|
|||
static struct cf_diag_ctrset {
|
||||
unsigned long ctrset; /* Bit mask of counter set to read */
|
||||
cpumask_t mask; /* CPU mask to read from */
|
||||
time64_t lastread; /* Epoch counter set last read */
|
||||
} cf_diag_ctrset;
|
||||
|
||||
static void cf_diag_ctrset_clear(void)
|
||||
|
@ -866,27 +863,16 @@ static int cf_diag_all_read(unsigned long arg)
|
|||
{
|
||||
struct cf_diag_call_on_cpu_parm p;
|
||||
cpumask_var_t mask;
|
||||
time64_t now;
|
||||
int rc = 0;
|
||||
int rc;
|
||||
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
now = ktime_get_seconds();
|
||||
if (cf_diag_ctrset.lastread + cf_diag_interval > now) {
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld "
|
||||
" lastread %lld\n", __func__, now,
|
||||
cf_diag_ctrset.lastread);
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
} else {
|
||||
cf_diag_ctrset.lastread = now;
|
||||
}
|
||||
|
||||
p.sets = cf_diag_ctrset.ctrset;
|
||||
cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
|
||||
on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
|
||||
rc = cf_diag_all_copy(arg, mask);
|
||||
out:
|
||||
free_cpumask_var(mask);
|
||||
debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
|
||||
return rc;
|
||||
|
|
|
@ -68,10 +68,10 @@ EXPORT_SYMBOL(s390_epoch_delta_notifier);
|
|||
|
||||
unsigned char ptff_function_mask[16];
|
||||
|
||||
static unsigned long long lpar_offset;
|
||||
static unsigned long long initial_leap_seconds;
|
||||
static unsigned long long tod_steering_end;
|
||||
static long long tod_steering_delta;
|
||||
static unsigned long lpar_offset;
|
||||
static unsigned long initial_leap_seconds;
|
||||
static unsigned long tod_steering_end;
|
||||
static long tod_steering_delta;
|
||||
|
||||
/*
|
||||
* Get time offsets with PTFF
|
||||
|
@ -96,7 +96,7 @@ void __init time_early_init(void)
|
|||
|
||||
/* get initial leap seconds */
|
||||
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
|
||||
initial_leap_seconds = (unsigned long long)
|
||||
initial_leap_seconds = (unsigned long)
|
||||
((long) qui.old_leap * 4096000000L);
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
|
|||
|
||||
static u64 read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
unsigned long long now, adj;
|
||||
unsigned long now, adj;
|
||||
|
||||
preempt_disable(); /* protect from changes to steering parameters */
|
||||
now = get_tod_clock();
|
||||
|
@ -362,7 +362,7 @@ static inline int check_sync_clock(void)
|
|||
* Apply clock delta to the global data structures.
|
||||
* This is called once on the CPU that performed the clock sync.
|
||||
*/
|
||||
static void clock_sync_global(unsigned long long delta)
|
||||
static void clock_sync_global(unsigned long delta)
|
||||
{
|
||||
unsigned long now, adj;
|
||||
struct ptff_qto qto;
|
||||
|
@ -378,7 +378,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
-(adj >> 15) : (adj >> 15);
|
||||
tod_steering_delta += delta;
|
||||
if ((abs(tod_steering_delta) >> 48) != 0)
|
||||
panic("TOD clock sync offset %lli is too large to drift\n",
|
||||
panic("TOD clock sync offset %li is too large to drift\n",
|
||||
tod_steering_delta);
|
||||
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
||||
vdso_data->arch_data.tod_steering_end = tod_steering_end;
|
||||
|
@ -394,7 +394,7 @@ static void clock_sync_global(unsigned long long delta)
|
|||
* Apply clock delta to the per-CPU data structures of this CPU.
|
||||
* This is called for each online CPU after the call to clock_sync_global.
|
||||
*/
|
||||
static void clock_sync_local(unsigned long long delta)
|
||||
static void clock_sync_local(unsigned long delta)
|
||||
{
|
||||
/* Add the delta to the clock comparator. */
|
||||
if (S390_lowcore.clock_comparator != clock_comparator_max) {
|
||||
|
@ -418,7 +418,7 @@ static void __init time_init_wq(void)
|
|||
struct clock_sync_data {
|
||||
atomic_t cpus;
|
||||
int in_sync;
|
||||
unsigned long long clock_delta;
|
||||
unsigned long clock_delta;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -538,7 +538,7 @@ static int stpinfo_valid(void)
|
|||
static int stp_sync_clock(void *data)
|
||||
{
|
||||
struct clock_sync_data *sync = data;
|
||||
unsigned long long clock_delta, flags;
|
||||
u64 clock_delta, flags;
|
||||
static int first;
|
||||
int rc;
|
||||
|
||||
|
@ -720,8 +720,8 @@ static ssize_t ctn_id_show(struct device *dev,
|
|||
|
||||
mutex_lock(&stp_mutex);
|
||||
if (stpinfo_valid())
|
||||
ret = sprintf(buf, "%016llx\n",
|
||||
*(unsigned long long *) stp_info.ctnid);
|
||||
ret = sprintf(buf, "%016lx\n",
|
||||
*(unsigned long *) stp_info.ctnid);
|
||||
mutex_unlock(&stp_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -794,7 +794,7 @@ static ssize_t leap_seconds_scheduled_show(struct device *dev,
|
|||
if (!stzi.lsoib.p)
|
||||
return sprintf(buf, "0,0\n");
|
||||
|
||||
return sprintf(buf, "%llu,%d\n",
|
||||
return sprintf(buf, "%lu,%d\n",
|
||||
tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
|
||||
stzi.lsoib.nlso - stzi.lsoib.also);
|
||||
}
|
||||
|
|
|
@ -76,8 +76,6 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
|
|||
}
|
||||
info = info->next;
|
||||
}
|
||||
if (cpumask_empty(&mask))
|
||||
cpumask_copy(&mask, cpumask_of(cpu));
|
||||
break;
|
||||
case TOPOLOGY_MODE_PACKAGE:
|
||||
cpumask_copy(&mask, cpu_present_mask);
|
||||
|
|
|
@ -1287,7 +1287,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
|||
/* already expired? */
|
||||
if (cputm >> 63)
|
||||
return 0;
|
||||
return min(sltime, tod_to_ns(cputm));
|
||||
return min_t(u64, sltime, tod_to_ns(cputm));
|
||||
}
|
||||
} else if (cpu_timer_interrupts_enabled(vcpu)) {
|
||||
sltime = kvm_s390_get_cpu_timer(vcpu);
|
||||
|
|
|
@ -93,7 +93,7 @@ CONFIG_NETDEVICES=y
|
|||
CONFIG_NET_ETHERNET=y
|
||||
CONFIG_MII=m
|
||||
CONFIG_SUNLANCE=m
|
||||
CONFIG_HAPPYMEAL=m
|
||||
CONFIG_HAPPYMEAL=y
|
||||
CONFIG_SUNGEM=m
|
||||
CONFIG_SUNVNET=m
|
||||
CONFIG_LDMVSW=m
|
||||
|
@ -234,9 +234,7 @@ CONFIG_CRYPTO_TWOFISH=m
|
|||
CONFIG_CRC16=m
|
||||
CONFIG_LIBCRC32C=m
|
||||
CONFIG_VCC=m
|
||||
CONFIG_ATA=y
|
||||
CONFIG_PATA_CMD64X=y
|
||||
CONFIG_HAPPYMEAL=y
|
||||
CONFIG_IP_PNP=y
|
||||
CONFIG_IP_PNP_DHCP=y
|
||||
CONFIG_DEVTMPFS=y
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/extable_64.h>
|
||||
#include <asm/spitfire.h>
|
||||
#include <asm/adi.h>
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_EXTABLE64_H
|
||||
#define __ASM_EXTABLE64_H
|
||||
#ifndef __ASM_EXTABLE_H
|
||||
#define __ASM_EXTABLE_H
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
|
@ -50,16 +50,12 @@ struct thread_struct {
|
|||
unsigned long fsr;
|
||||
unsigned long fpqdepth;
|
||||
struct fpq fpqueue[16];
|
||||
unsigned long flags;
|
||||
mm_segment_t current_ds;
|
||||
};
|
||||
|
||||
#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
|
||||
#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
|
||||
|
||||
#define INIT_THREAD { \
|
||||
.flags = SPARC_FLAG_KTHREAD, \
|
||||
.current_ds = KERNEL_DS, \
|
||||
.kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
|
||||
}
|
||||
|
||||
/* Do necessary setup to start up a newly executed thread. */
|
||||
|
|
|
@ -118,6 +118,7 @@ struct thread_info {
|
|||
.task = &tsk, \
|
||||
.current_ds = ASI_P, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
|
||||
}
|
||||
|
||||
/* how to get the thread information struct from C */
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ___ASM_SPARC_UACCESS_H
|
||||
#define ___ASM_SPARC_UACCESS_H
|
||||
|
||||
#include <asm/extable.h>
|
||||
|
||||
#if defined(__sparc__) && defined(__arch64__)
|
||||
#include <asm/uaccess_64.h>
|
||||
#else
|
||||
|
|
|
@ -13,9 +13,6 @@
|
|||
|
||||
#include <asm/processor.h>
|
||||
|
||||
#define ARCH_HAS_SORT_EXTABLE
|
||||
#define ARCH_HAS_SEARCH_EXTABLE
|
||||
|
||||
/* Sparc is not segmented, however we need to be able to fool access_ok()
|
||||
* when doing system calls from kernel mode legitimately.
|
||||
*
|
||||
|
@ -40,36 +37,6 @@
|
|||
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
|
||||
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
* the address at which the program should continue. No registers are
|
||||
* modified, so it is entirely up to the continuation code to figure out
|
||||
* what to do.
|
||||
*
|
||||
* All the routines below use bits of fixup code that are out of line
|
||||
* with the main instruction path. This means when everything is well,
|
||||
* we don't even have to jump over them. Further, they do not intrude
|
||||
* on our cache or tlb entries.
|
||||
*
|
||||
* There is a special way how to put a range of potentially faulting
|
||||
* insns (like twenty ldd/std's with now intervening other instructions)
|
||||
* You specify address of first in insn and 0 in fixup and in the next
|
||||
* exception_table_entry you specify last potentially faulting insn + 1
|
||||
* and in fixup the routine which should handle the fault.
|
||||
* That fixup code will get
|
||||
* (faulting_insn_address - first_insn_in_the_range_address)/4
|
||||
* in %g2 (ie. index of the faulting instruction in the range).
|
||||
*/
|
||||
|
||||
struct exception_table_entry
|
||||
{
|
||||
unsigned long insn, fixup;
|
||||
};
|
||||
|
||||
/* Returns 0 if exception not found and fixup otherwise. */
|
||||
unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
|
||||
|
||||
/* Uh, these should become the main single-value transfer routines..
|
||||
* They automatically use the right size if we just have the right
|
||||
* pointer type..
|
||||
|
@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
|
|||
unsigned long ret;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
".section __ex_table,#alloc\n\t"
|
||||
".align 4\n\t"
|
||||
".word 1f,3\n\t"
|
||||
".previous\n\t"
|
||||
"mov %2, %%o1\n"
|
||||
"1:\n\t"
|
||||
"call __bzero\n\t"
|
||||
" mov %1, %%o0\n\t"
|
||||
"mov %%o0, %0\n"
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include <linux/string.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/spitfire.h>
|
||||
#include <asm/extable_64.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
|
|
|
@ -515,7 +515,7 @@ continue_boot:
|
|||
|
||||
/* I want a kernel stack NOW! */
|
||||
set init_thread_union, %g1
|
||||
set (THREAD_SIZE - STACKFRAME_SZ), %g2
|
||||
set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2
|
||||
add %g1, %g2, %sp
|
||||
mov 0, %fp /* And for good luck */
|
||||
|
||||
|
|
|
@ -706,7 +706,7 @@ tlb_fixup_done:
|
|||
wr %g0, ASI_P, %asi
|
||||
mov 1, %g1
|
||||
sllx %g1, THREAD_SHIFT, %g1
|
||||
sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
|
||||
sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1
|
||||
add %g6, %g1, %sp
|
||||
|
||||
/* Set per-cpu pointer initially to zero, this makes
|
||||
|
|
|
@ -216,16 +216,6 @@ void flush_thread(void)
|
|||
clear_thread_flag(TIF_USEDFPU);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* This task is no longer a kernel thread. */
|
||||
if (current->thread.flags & SPARC_FLAG_KTHREAD) {
|
||||
current->thread.flags &= ~SPARC_FLAG_KTHREAD;
|
||||
|
||||
/* We must fixup kregs as well. */
|
||||
/* XXX This was not fixed for ti for a while, worked. Unused? */
|
||||
current->thread.kregs = (struct pt_regs *)
|
||||
(task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct sparc_stackf __user *
|
||||
|
@ -313,7 +303,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
|||
extern int nwindows;
|
||||
unsigned long psr;
|
||||
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
|
||||
p->thread.flags |= SPARC_FLAG_KTHREAD;
|
||||
p->thread.current_ds = KERNEL_DS;
|
||||
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
|
||||
childregs->u_regs[UREG_G1] = sp; /* function */
|
||||
|
@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
|
|||
}
|
||||
memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
|
||||
childregs->u_regs[UREG_FP] = sp;
|
||||
p->thread.flags &= ~SPARC_FLAG_KTHREAD;
|
||||
p->thread.current_ds = USER_DS;
|
||||
ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
|
||||
ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
|
||||
|
|
|
@ -266,7 +266,6 @@ static __init void leon_patch(void)
|
|||
}
|
||||
|
||||
struct tt_entry *sparc_ttable;
|
||||
static struct pt_regs fake_swapper_regs;
|
||||
|
||||
/* Called from head_32.S - before we have setup anything
|
||||
* in the kernel. Be very careful with what you do here.
|
||||
|
@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
(*(linux_dbvec->teach_debugger))();
|
||||
}
|
||||
|
||||
init_task.thread.kregs = &fake_swapper_regs;
|
||||
|
||||
/* Run-time patch instructions to match the cpu model */
|
||||
per_cpu_patch();
|
||||
|
||||
|
|
|
@ -165,8 +165,6 @@ extern int root_mountflags;
|
|||
|
||||
char reboot_command[COMMAND_LINE_SIZE];
|
||||
|
||||
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
|
||||
|
||||
static void __init per_cpu_patch(void)
|
||||
{
|
||||
struct cpuid_patch_entry *p;
|
||||
|
@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
|
||||
#endif
|
||||
|
||||
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
|
||||
|
||||
#ifdef CONFIG_IP_PNP
|
||||
if (!ic_set_manually) {
|
||||
phandle chosen = prom_finddevice("/chosen");
|
||||
|
|
|
@ -275,13 +275,12 @@ bool is_no_fault_exception(struct pt_regs *regs)
|
|||
asi = (regs->tstate >> 24); /* saved %asi */
|
||||
else
|
||||
asi = (insn >> 5); /* immediate asi */
|
||||
if ((asi & 0xf2) == ASI_PNF) {
|
||||
if (insn & 0x1000000) { /* op3[5:4]=3 */
|
||||
handle_ldf_stq(insn, regs);
|
||||
return true;
|
||||
} else if (insn & 0x200000) { /* op3[2], stores */
|
||||
if ((asi & 0xf6) == ASI_PNF) {
|
||||
if (insn & 0x200000) /* op3[2], stores */
|
||||
return false;
|
||||
}
|
||||
if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
|
||||
handle_ldf_stq(insn, regs);
|
||||
else
|
||||
handle_ld_nf(insn, regs);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/extable.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
|
||||
|
@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
|
|||
|
||||
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
|
||||
{
|
||||
unsigned long g2 = regs->u_regs [UREG_G2];
|
||||
unsigned long fixup = search_extables_range(regs->pc, &g2);
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
if (!fixup) {
|
||||
entry = search_exception_tables(regs->pc);
|
||||
if (!entry) {
|
||||
unsigned long address = compute_effective_address(regs, insn);
|
||||
if(address < PAGE_SIZE) {
|
||||
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
|
||||
|
@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
|
|||
die_if_kernel("Oops", regs);
|
||||
/* Not reached */
|
||||
}
|
||||
regs->pc = fixup;
|
||||
regs->pc = entry->fixup;
|
||||
regs->npc = regs->pc + 4;
|
||||
regs->u_regs [UREG_G2] = g2;
|
||||
}
|
||||
|
||||
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
||||
|
@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
|
||||
enum direction dir)
|
||||
{
|
||||
unsigned int reg;
|
||||
int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
|
||||
|
||||
if ((regs->pc | regs->npc) & 3)
|
||||
return 0;
|
||||
|
||||
/* Must access_ok() in all the necessary places. */
|
||||
#define WINREG_ADDR(regnum) \
|
||||
((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
|
||||
|
||||
reg = (insn >> 25) & 0x1f;
|
||||
if (reg >= 16) {
|
||||
if (!access_ok(WINREG_ADDR(reg - 16), size))
|
||||
return -EFAULT;
|
||||
}
|
||||
reg = (insn >> 14) & 0x1f;
|
||||
if (reg >= 16) {
|
||||
if (!access_ok(WINREG_ADDR(reg - 16), size))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (!(insn & 0x2000)) {
|
||||
reg = (insn & 0x1f);
|
||||
if (reg >= 16) {
|
||||
if (!access_ok(WINREG_ADDR(reg - 16), size))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
#undef WINREG_ADDR
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
|
||||
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
||||
{
|
||||
send_sig_fault(SIGBUS, BUS_ADRALN,
|
||||
(void __user *)safe_compute_effective_address(regs, insn),
|
||||
0, current);
|
||||
}
|
||||
|
||||
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
|
||||
{
|
||||
enum direction dir;
|
||||
|
||||
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
|
||||
(((insn >> 30) & 3) != 3))
|
||||
goto kill_user;
|
||||
dir = decode_direction(insn);
|
||||
if(!ok_for_user(regs, insn, dir)) {
|
||||
goto kill_user;
|
||||
} else {
|
||||
int err, size = decode_access_size(insn);
|
||||
unsigned long addr;
|
||||
|
||||
if(floating_point_load_or_store_p(insn)) {
|
||||
printk("User FPU load/store unaligned unsupported.\n");
|
||||
goto kill_user;
|
||||
}
|
||||
|
||||
addr = compute_effective_address(regs, insn);
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
|
||||
switch(dir) {
|
||||
case load:
|
||||
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
|
||||
regs),
|
||||
size, (unsigned long *) addr,
|
||||
decode_signedness(insn));
|
||||
break;
|
||||
|
||||
case store:
|
||||
err = do_int_store(((insn>>25)&0x1f), size,
|
||||
(unsigned long *) addr, regs);
|
||||
break;
|
||||
|
||||
case both:
|
||||
/*
|
||||
* This was supported in 2.4. However, we question
|
||||
* the value of SWAP instruction across word boundaries.
|
||||
*/
|
||||
printk("Unaligned SWAP unsupported.\n");
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
default:
|
||||
unaligned_panic("Impossible user unaligned trap.");
|
||||
goto out;
|
||||
}
|
||||
if (err)
|
||||
goto kill_user;
|
||||
else
|
||||
advance(regs);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kill_user:
|
||||
user_mna_trap_fault(regs, insn);
|
||||
out:
|
||||
;
|
||||
}
|
||||
|
|
|
@ -155,13 +155,6 @@ cpout: retl ! get outta here
|
|||
.text; \
|
||||
.align 4
|
||||
|
||||
#define EXT(start,end) \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
.word start, 0, end, cc_fault; \
|
||||
.text; \
|
||||
.align 4
|
||||
|
||||
/* This aligned version executes typically in 8.5 superscalar cycles, this
|
||||
* is the best I can do. I say 8.5 because the final add will pair with
|
||||
* the next ldd in the main unrolled loop. Thus the pipe is always full.
|
||||
|
@ -169,20 +162,20 @@ cpout: retl ! get outta here
|
|||
* please check the fixup code below as well.
|
||||
*/
|
||||
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||
ldd [src + off + 0x00], t0; \
|
||||
ldd [src + off + 0x08], t2; \
|
||||
EX(ldd [src + off + 0x00], t0); \
|
||||
EX(ldd [src + off + 0x08], t2); \
|
||||
addxcc t0, sum, sum; \
|
||||
ldd [src + off + 0x10], t4; \
|
||||
EX(ldd [src + off + 0x10], t4); \
|
||||
addxcc t1, sum, sum; \
|
||||
ldd [src + off + 0x18], t6; \
|
||||
EX(ldd [src + off + 0x18], t6); \
|
||||
addxcc t2, sum, sum; \
|
||||
std t0, [dst + off + 0x00]; \
|
||||
EX(std t0, [dst + off + 0x00]); \
|
||||
addxcc t3, sum, sum; \
|
||||
std t2, [dst + off + 0x08]; \
|
||||
EX(std t2, [dst + off + 0x08]); \
|
||||
addxcc t4, sum, sum; \
|
||||
std t4, [dst + off + 0x10]; \
|
||||
EX(std t4, [dst + off + 0x10]); \
|
||||
addxcc t5, sum, sum; \
|
||||
std t6, [dst + off + 0x18]; \
|
||||
EX(std t6, [dst + off + 0x18]); \
|
||||
addxcc t6, sum, sum; \
|
||||
addxcc t7, sum, sum;
|
||||
|
||||
|
@ -191,39 +184,39 @@ cpout: retl ! get outta here
|
|||
* Viking MXCC into streaming mode. Ho hum...
|
||||
*/
|
||||
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||
ldd [src + off + 0x00], t0; \
|
||||
ldd [src + off + 0x08], t2; \
|
||||
ldd [src + off + 0x10], t4; \
|
||||
ldd [src + off + 0x18], t6; \
|
||||
st t0, [dst + off + 0x00]; \
|
||||
EX(ldd [src + off + 0x00], t0); \
|
||||
EX(ldd [src + off + 0x08], t2); \
|
||||
EX(ldd [src + off + 0x10], t4); \
|
||||
EX(ldd [src + off + 0x18], t6); \
|
||||
EX(st t0, [dst + off + 0x00]); \
|
||||
addxcc t0, sum, sum; \
|
||||
st t1, [dst + off + 0x04]; \
|
||||
EX(st t1, [dst + off + 0x04]); \
|
||||
addxcc t1, sum, sum; \
|
||||
st t2, [dst + off + 0x08]; \
|
||||
EX(st t2, [dst + off + 0x08]); \
|
||||
addxcc t2, sum, sum; \
|
||||
st t3, [dst + off + 0x0c]; \
|
||||
EX(st t3, [dst + off + 0x0c]); \
|
||||
addxcc t3, sum, sum; \
|
||||
st t4, [dst + off + 0x10]; \
|
||||
EX(st t4, [dst + off + 0x10]); \
|
||||
addxcc t4, sum, sum; \
|
||||
st t5, [dst + off + 0x14]; \
|
||||
EX(st t5, [dst + off + 0x14]); \
|
||||
addxcc t5, sum, sum; \
|
||||
st t6, [dst + off + 0x18]; \
|
||||
EX(st t6, [dst + off + 0x18]); \
|
||||
addxcc t6, sum, sum; \
|
||||
st t7, [dst + off + 0x1c]; \
|
||||
EX(st t7, [dst + off + 0x1c]); \
|
||||
addxcc t7, sum, sum;
|
||||
|
||||
/* Yuck, 6 superscalar cycles... */
|
||||
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
|
||||
ldd [src - off - 0x08], t0; \
|
||||
ldd [src - off - 0x00], t2; \
|
||||
EX(ldd [src - off - 0x08], t0); \
|
||||
EX(ldd [src - off - 0x00], t2); \
|
||||
addxcc t0, sum, sum; \
|
||||
st t0, [dst - off - 0x08]; \
|
||||
EX(st t0, [dst - off - 0x08]); \
|
||||
addxcc t1, sum, sum; \
|
||||
st t1, [dst - off - 0x04]; \
|
||||
EX(st t1, [dst - off - 0x04]); \
|
||||
addxcc t2, sum, sum; \
|
||||
st t2, [dst - off - 0x00]; \
|
||||
EX(st t2, [dst - off - 0x00]); \
|
||||
addxcc t3, sum, sum; \
|
||||
st t3, [dst - off + 0x04];
|
||||
EX(st t3, [dst - off + 0x04]);
|
||||
|
||||
/* Handle the end cruft code out of band for better cache patterns. */
|
||||
cc_end_cruft:
|
||||
|
@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
|
|||
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||
10: EXT(5b, 10b) ! note for exception handling
|
||||
sub %g1, 128, %g1 ! detract from length
|
||||
addx %g0, %g7, %g7 ! add in last carry bit
|
||||
andcc %g1, 0xffffff80, %g0 ! more to csum?
|
||||
|
@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
|
|||
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
|
||||
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
|
||||
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
|
||||
12: EXT(cctbl, 12b) ! note for exception table handling
|
||||
addx %g0, %g7, %g7
|
||||
12: addx %g0, %g7, %g7
|
||||
andcc %o3, 0xf, %g0 ! check for low bits set
|
||||
ccte: bne cc_end_cruft ! something left, handle it out of band
|
||||
andcc %o3, 8, %g0 ! begin checks for that code
|
||||
|
@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
|
|||
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
|
||||
11: EXT(ccdbl, 11b) ! note for exception table handling
|
||||
sub %g1, 128, %g1 ! detract from length
|
||||
addx %g0, %g7, %g7 ! add in last carry bit
|
||||
andcc %g1, 0xffffff80, %g0 ! more to csum?
|
||||
|
|
|
@ -21,98 +21,134 @@
|
|||
/* Work around cpp -rob */
|
||||
#define ALLOC #alloc
|
||||
#define EXECINSTR #execinstr
|
||||
|
||||
#define EX_ENTRY(l1, l2) \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
.word l1, l2; \
|
||||
.text;
|
||||
|
||||
#define EX(x,y,a,b) \
|
||||
98: x,y; \
|
||||
.section .fixup,ALLOC,EXECINSTR; \
|
||||
.align 4; \
|
||||
99: ba fixupretl; \
|
||||
a, b, %g3; \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
.word 98b, 99b; \
|
||||
.text; \
|
||||
.align 4
|
||||
99: retl; \
|
||||
a, b, %o0; \
|
||||
EX_ENTRY(98b, 99b)
|
||||
|
||||
#define EX2(x,y,c,d,e,a,b) \
|
||||
98: x,y; \
|
||||
.section .fixup,ALLOC,EXECINSTR; \
|
||||
.align 4; \
|
||||
99: c, d, e; \
|
||||
ba fixupretl; \
|
||||
a, b, %g3; \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
.word 98b, 99b; \
|
||||
.text; \
|
||||
.align 4
|
||||
retl; \
|
||||
a, b, %o0; \
|
||||
EX_ENTRY(98b, 99b)
|
||||
|
||||
#define EXO2(x,y) \
|
||||
98: x, y; \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
.word 98b, 97f; \
|
||||
.text; \
|
||||
.align 4
|
||||
EX_ENTRY(98b, 97f)
|
||||
|
||||
#define EXT(start,end,handler) \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
.word start, 0, end, handler; \
|
||||
.text; \
|
||||
.align 4
|
||||
#define LD(insn, src, offset, reg, label) \
|
||||
98: insn [%src + (offset)], %reg; \
|
||||
.section .fixup,ALLOC,EXECINSTR; \
|
||||
99: ba label; \
|
||||
mov offset, %g5; \
|
||||
EX_ENTRY(98b, 99b)
|
||||
|
||||
/* Please do not change following macros unless you change logic used
|
||||
* in .fixup at the end of this file as well
|
||||
*/
|
||||
#define ST(insn, dst, offset, reg, label) \
|
||||
98: insn %reg, [%dst + (offset)]; \
|
||||
.section .fixup,ALLOC,EXECINSTR; \
|
||||
99: ba label; \
|
||||
mov offset, %g5; \
|
||||
EX_ENTRY(98b, 99b)
|
||||
|
||||
/* Both these macros have to start with exactly the same insn */
|
||||
/* left: g7 + (g1 % 128) - offset */
|
||||
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||
ldd [%src + (offset) + 0x00], %t0; \
|
||||
ldd [%src + (offset) + 0x08], %t2; \
|
||||
ldd [%src + (offset) + 0x10], %t4; \
|
||||
ldd [%src + (offset) + 0x18], %t6; \
|
||||
st %t0, [%dst + (offset) + 0x00]; \
|
||||
st %t1, [%dst + (offset) + 0x04]; \
|
||||
st %t2, [%dst + (offset) + 0x08]; \
|
||||
st %t3, [%dst + (offset) + 0x0c]; \
|
||||
st %t4, [%dst + (offset) + 0x10]; \
|
||||
st %t5, [%dst + (offset) + 0x14]; \
|
||||
st %t6, [%dst + (offset) + 0x18]; \
|
||||
st %t7, [%dst + (offset) + 0x1c];
|
||||
LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
|
||||
LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
|
||||
LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
|
||||
LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x00, t0, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x04, t1, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x08, t2, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x10, t4, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x14, t5, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x18, t6, bigchunk_fault) \
|
||||
ST(st, dst, offset + 0x1c, t7, bigchunk_fault)
|
||||
|
||||
/* left: g7 + (g1 % 128) - offset */
|
||||
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
|
||||
ldd [%src + (offset) + 0x00], %t0; \
|
||||
ldd [%src + (offset) + 0x08], %t2; \
|
||||
ldd [%src + (offset) + 0x10], %t4; \
|
||||
ldd [%src + (offset) + 0x18], %t6; \
|
||||
std %t0, [%dst + (offset) + 0x00]; \
|
||||
std %t2, [%dst + (offset) + 0x08]; \
|
||||
std %t4, [%dst + (offset) + 0x10]; \
|
||||
std %t6, [%dst + (offset) + 0x18];
|
||||
LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
|
||||
LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
|
||||
LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
|
||||
LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
|
||||
ST(std, dst, offset + 0x00, t0, bigchunk_fault) \
|
||||
ST(std, dst, offset + 0x08, t2, bigchunk_fault) \
|
||||
ST(std, dst, offset + 0x10, t4, bigchunk_fault) \
|
||||
ST(std, dst, offset + 0x18, t6, bigchunk_fault)
|
||||
|
||||
.section .fixup,#alloc,#execinstr
|
||||
bigchunk_fault:
|
||||
sub %g7, %g5, %o0
|
||||
and %g1, 127, %g1
|
||||
retl
|
||||
add %o0, %g1, %o0
|
||||
|
||||
/* left: offset + 16 + (g1 % 16) */
|
||||
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
||||
ldd [%src - (offset) - 0x10], %t0; \
|
||||
ldd [%src - (offset) - 0x08], %t2; \
|
||||
st %t0, [%dst - (offset) - 0x10]; \
|
||||
st %t1, [%dst - (offset) - 0x0c]; \
|
||||
st %t2, [%dst - (offset) - 0x08]; \
|
||||
st %t3, [%dst - (offset) - 0x04];
|
||||
LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \
|
||||
LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \
|
||||
ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \
|
||||
ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \
|
||||
ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \
|
||||
ST(st, dst, -(offset + 0x04), t3, lastchunk_fault)
|
||||
|
||||
.section .fixup,#alloc,#execinstr
|
||||
lastchunk_fault:
|
||||
and %g1, 15, %g1
|
||||
retl
|
||||
sub %g1, %g5, %o0
|
||||
|
||||
/* left: o3 + (o2 % 16) - offset */
|
||||
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
|
||||
lduh [%src + (offset) + 0x00], %t0; \
|
||||
lduh [%src + (offset) + 0x02], %t1; \
|
||||
lduh [%src + (offset) + 0x04], %t2; \
|
||||
lduh [%src + (offset) + 0x06], %t3; \
|
||||
sth %t0, [%dst + (offset) + 0x00]; \
|
||||
sth %t1, [%dst + (offset) + 0x02]; \
|
||||
sth %t2, [%dst + (offset) + 0x04]; \
|
||||
sth %t3, [%dst + (offset) + 0x06];
|
||||
LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \
|
||||
LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \
|
||||
LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \
|
||||
LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \
|
||||
ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \
|
||||
ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \
|
||||
ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \
|
||||
ST(sth, dst, offset + 0x06, t3, halfchunk_fault)
|
||||
|
||||
/* left: o3 + (o2 % 16) + offset + 2 */
|
||||
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
|
||||
ldub [%src - (offset) - 0x02], %t0; \
|
||||
ldub [%src - (offset) - 0x01], %t1; \
|
||||
stb %t0, [%dst - (offset) - 0x02]; \
|
||||
stb %t1, [%dst - (offset) - 0x01];
|
||||
LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \
|
||||
LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \
|
||||
ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \
|
||||
ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault)
|
||||
|
||||
.section .fixup,#alloc,#execinstr
|
||||
halfchunk_fault:
|
||||
and %o2, 15, %o2
|
||||
sub %o3, %g5, %o3
|
||||
retl
|
||||
add %o2, %o3, %o0
|
||||
|
||||
/* left: offset + 2 + (o2 % 2) */
|
||||
#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \
|
||||
LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \
|
||||
LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \
|
||||
ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \
|
||||
ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault)
|
||||
|
||||
.section .fixup,#alloc,#execinstr
|
||||
last_shortchunk_fault:
|
||||
and %o2, 1, %o2
|
||||
retl
|
||||
sub %o2, %g5, %o0
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
|
|||
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||
80:
|
||||
EXT(5b, 80b, 50f)
|
||||
subcc %g7, 128, %g7
|
||||
add %o1, 128, %o1
|
||||
bne 5b
|
||||
|
@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
|
|||
jmpl %o5 + %lo(copy_user_table_end), %g0
|
||||
add %o0, %g7, %o0
|
||||
|
||||
copy_user_table:
|
||||
MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
|
||||
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
|
||||
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
|
||||
|
@ -210,7 +243,6 @@ copy_user_table:
|
|||
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
|
||||
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
||||
copy_user_table_end:
|
||||
EXT(copy_user_table, copy_user_table_end, 51f)
|
||||
be copy_user_last7
|
||||
andcc %g1, 4, %g0
|
||||
|
||||
|
@ -250,8 +282,6 @@ ldd_std:
|
|||
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
|
||||
81:
|
||||
EXT(ldd_std, 81b, 52f)
|
||||
subcc %g7, 128, %g7
|
||||
add %o1, 128, %o1
|
||||
bne ldd_std
|
||||
|
@ -290,8 +320,6 @@ cannot_optimize:
|
|||
10:
|
||||
MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
|
||||
MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
|
||||
82:
|
||||
EXT(10b, 82b, 53f)
|
||||
subcc %o3, 0x10, %o3
|
||||
add %o1, 0x10, %o1
|
||||
bne 10b
|
||||
|
@ -308,8 +336,6 @@ byte_chunk:
|
|||
MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
|
||||
83:
|
||||
EXT(byte_chunk, 83b, 54f)
|
||||
subcc %o3, 0x10, %o3
|
||||
add %o1, 0x10, %o1
|
||||
bne byte_chunk
|
||||
|
@ -325,16 +351,14 @@ short_end:
|
|||
add %o1, %o3, %o1
|
||||
jmpl %o5 + %lo(short_table_end), %g0
|
||||
andcc %o2, 1, %g0
|
||||
84:
|
||||
MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
|
||||
MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
|
||||
MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
|
||||
MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
|
||||
MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3)
|
||||
MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3)
|
||||
MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3)
|
||||
MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3)
|
||||
MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3)
|
||||
short_table_end:
|
||||
EXT(84b, short_table_end, 55f)
|
||||
be 1f
|
||||
nop
|
||||
EX(ldub [%o1], %g2, add %g0, 1)
|
||||
|
@ -363,123 +387,8 @@ short_aligned_end:
|
|||
.section .fixup,#alloc,#execinstr
|
||||
.align 4
|
||||
97:
|
||||
mov %o2, %g3
|
||||
fixupretl:
|
||||
retl
|
||||
mov %g3, %o0
|
||||
|
||||
/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
|
||||
50:
|
||||
/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
|
||||
* happens. This is derived from the amount ldd reads, st stores, etc.
|
||||
* x = g2 % 12;
|
||||
* g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
|
||||
* o0 += (g2 / 12) * 32;
|
||||
*/
|
||||
cmp %g2, 12
|
||||
add %o0, %g7, %o0
|
||||
bcs 1f
|
||||
cmp %g2, 24
|
||||
bcs 2f
|
||||
cmp %g2, 36
|
||||
bcs 3f
|
||||
nop
|
||||
sub %g2, 12, %g2
|
||||
sub %g7, 32, %g7
|
||||
3: sub %g2, 12, %g2
|
||||
sub %g7, 32, %g7
|
||||
2: sub %g2, 12, %g2
|
||||
sub %g7, 32, %g7
|
||||
1: cmp %g2, 4
|
||||
bcs,a 60f
|
||||
clr %g2
|
||||
sub %g2, 4, %g2
|
||||
sll %g2, 2, %g2
|
||||
60: and %g1, 0x7f, %g3
|
||||
sub %o0, %g7, %o0
|
||||
add %g3, %g7, %g3
|
||||
ba fixupretl
|
||||
sub %g3, %g2, %g3
|
||||
51:
|
||||
/* i = 41 - g2; j = i % 6;
|
||||
* g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
|
||||
* o0 -= (i / 6) * 16 + 16;
|
||||
*/
|
||||
neg %g2
|
||||
and %g1, 0xf, %g1
|
||||
add %g2, 41, %g2
|
||||
add %o0, %g1, %o0
|
||||
1: cmp %g2, 6
|
||||
bcs,a 2f
|
||||
cmp %g2, 4
|
||||
add %g1, 16, %g1
|
||||
b 1b
|
||||
sub %g2, 6, %g2
|
||||
2: bcc,a 2f
|
||||
mov 16, %g2
|
||||
inc %g2
|
||||
sll %g2, 2, %g2
|
||||
2: add %g1, %g2, %g3
|
||||
ba fixupretl
|
||||
sub %o0, %g3, %o0
|
||||
52:
|
||||
/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
|
||||
o0 += (g2 / 8) * 32 */
|
||||
andn %g2, 7, %g4
|
||||
add %o0, %g7, %o0
|
||||
andcc %g2, 4, %g0
|
||||
and %g2, 3, %g2
|
||||
sll %g4, 2, %g4
|
||||
sll %g2, 3, %g2
|
||||
bne 60b
|
||||
sub %g7, %g4, %g7
|
||||
ba 60b
|
||||
clr %g2
|
||||
53:
|
||||
/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
|
||||
o0 += (g2 & 8) */
|
||||
and %g2, 3, %g4
|
||||
andcc %g2, 4, %g0
|
||||
and %g2, 8, %g2
|
||||
sll %g4, 1, %g4
|
||||
be 1f
|
||||
add %o0, %g2, %o0
|
||||
add %g2, %g4, %g2
|
||||
1: and %o2, 0xf, %g3
|
||||
add %g3, %o3, %g3
|
||||
ba fixupretl
|
||||
sub %g3, %g2, %g3
|
||||
54:
|
||||
/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
|
||||
o0 += (g2 / 4) * 2 */
|
||||
srl %g2, 2, %o4
|
||||
and %g2, 1, %o5
|
||||
srl %g2, 1, %g2
|
||||
add %o4, %o4, %o4
|
||||
and %o5, %g2, %o5
|
||||
and %o2, 0xf, %o2
|
||||
add %o0, %o4, %o0
|
||||
sub %o3, %o5, %o3
|
||||
sub %o2, %o4, %o2
|
||||
ba fixupretl
|
||||
add %o2, %o3, %g3
|
||||
55:
|
||||
/* i = 27 - g2;
|
||||
g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
|
||||
o0 -= i / 4 * 2 + 1 */
|
||||
neg %g2
|
||||
and %o2, 1, %o2
|
||||
add %g2, 27, %g2
|
||||
srl %g2, 2, %o5
|
||||
andcc %g2, 3, %g0
|
||||
mov 1, %g2
|
||||
add %o5, %o5, %o5
|
||||
be,a 1f
|
||||
clr %g2
|
||||
1: add %g2, %o5, %g3
|
||||
sub %o0, %g3, %o0
|
||||
ba fixupretl
|
||||
add %g3, %o2, %g3
|
||||
mov %o2, %o0
|
||||
|
||||
.globl __copy_user_end
|
||||
__copy_user_end:
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
98: x,y; \
|
||||
.section .fixup,ALLOC,EXECINSTR; \
|
||||
.align 4; \
|
||||
99: ba 30f; \
|
||||
99: retl; \
|
||||
a, b, %o0; \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
|
@ -27,35 +27,44 @@
|
|||
.text; \
|
||||
.align 4
|
||||
|
||||
#define EXT(start,end,handler) \
|
||||
#define STORE(source, base, offset, n) \
|
||||
98: std source, [base + offset + n]; \
|
||||
.section .fixup,ALLOC,EXECINSTR; \
|
||||
.align 4; \
|
||||
99: ba 30f; \
|
||||
sub %o3, n - offset, %o3; \
|
||||
.section __ex_table,ALLOC; \
|
||||
.align 4; \
|
||||
.word start, 0, end, handler; \
|
||||
.word 98b, 99b; \
|
||||
.text; \
|
||||
.align 4
|
||||
.align 4;
|
||||
|
||||
#define STORE_LAST(source, base, offset, n) \
|
||||
EX(std source, [base - offset - n], \
|
||||
add %o1, offset + n);
|
||||
|
||||
/* Please don't change these macros, unless you change the logic
|
||||
* in the .fixup section below as well.
|
||||
* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
|
||||
#define ZERO_BIG_BLOCK(base, offset, source) \
|
||||
std source, [base + offset + 0x00]; \
|
||||
std source, [base + offset + 0x08]; \
|
||||
std source, [base + offset + 0x10]; \
|
||||
std source, [base + offset + 0x18]; \
|
||||
std source, [base + offset + 0x20]; \
|
||||
std source, [base + offset + 0x28]; \
|
||||
std source, [base + offset + 0x30]; \
|
||||
std source, [base + offset + 0x38];
|
||||
STORE(source, base, offset, 0x00); \
|
||||
STORE(source, base, offset, 0x08); \
|
||||
STORE(source, base, offset, 0x10); \
|
||||
STORE(source, base, offset, 0x18); \
|
||||
STORE(source, base, offset, 0x20); \
|
||||
STORE(source, base, offset, 0x28); \
|
||||
STORE(source, base, offset, 0x30); \
|
||||
STORE(source, base, offset, 0x38);
|
||||
|
||||
#define ZERO_LAST_BLOCKS(base, offset, source) \
|
||||
std source, [base - offset - 0x38]; \
|
||||
std source, [base - offset - 0x30]; \
|
||||
std source, [base - offset - 0x28]; \
|
||||
std source, [base - offset - 0x20]; \
|
||||
std source, [base - offset - 0x18]; \
|
||||
std source, [base - offset - 0x10]; \
|
||||
std source, [base - offset - 0x08]; \
|
||||
std source, [base - offset - 0x00];
|
||||
STORE_LAST(source, base, offset, 0x38); \
|
||||
STORE_LAST(source, base, offset, 0x30); \
|
||||
STORE_LAST(source, base, offset, 0x28); \
|
||||
STORE_LAST(source, base, offset, 0x20); \
|
||||
STORE_LAST(source, base, offset, 0x18); \
|
||||
STORE_LAST(source, base, offset, 0x10); \
|
||||
STORE_LAST(source, base, offset, 0x08); \
|
||||
STORE_LAST(source, base, offset, 0x00);
|
||||
|
||||
.text
|
||||
.align 4
|
||||
|
@ -68,8 +77,6 @@ __bzero_begin:
|
|||
.globl memset
|
||||
EXPORT_SYMBOL(__bzero)
|
||||
EXPORT_SYMBOL(memset)
|
||||
.globl __memset_start, __memset_end
|
||||
__memset_start:
|
||||
memset:
|
||||
mov %o0, %g1
|
||||
mov 1, %g4
|
||||
|
@ -122,8 +129,6 @@ __bzero:
|
|||
ZERO_BIG_BLOCK(%o0, 0x00, %g2)
|
||||
subcc %o3, 128, %o3
|
||||
ZERO_BIG_BLOCK(%o0, 0x40, %g2)
|
||||
11:
|
||||
EXT(10b, 11b, 20f)
|
||||
bne 10b
|
||||
add %o0, 128, %o0
|
||||
|
||||
|
@ -138,11 +143,9 @@ __bzero:
|
|||
jmp %o4
|
||||
add %o0, %o2, %o0
|
||||
|
||||
12:
|
||||
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
|
||||
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
|
||||
13:
|
||||
EXT(12b, 13b, 21f)
|
||||
be 8f
|
||||
andcc %o1, 4, %g0
|
||||
|
||||
|
@ -182,37 +185,13 @@ __bzero:
|
|||
5:
|
||||
retl
|
||||
clr %o0
|
||||
__memset_end:
|
||||
|
||||
.section .fixup,#alloc,#execinstr
|
||||
.align 4
|
||||
20:
|
||||
cmp %g2, 8
|
||||
bleu 1f
|
||||
and %o1, 0x7f, %o1
|
||||
sub %g2, 9, %g2
|
||||
add %o3, 64, %o3
|
||||
1:
|
||||
sll %g2, 3, %g2
|
||||
add %o3, %o1, %o0
|
||||
b 30f
|
||||
sub %o0, %g2, %o0
|
||||
21:
|
||||
mov 8, %o0
|
||||
and %o1, 7, %o1
|
||||
sub %o0, %g2, %o0
|
||||
sll %o0, 3, %o0
|
||||
b 30f
|
||||
add %o0, %o1, %o0
|
||||
30:
|
||||
/* %o4 is faulting address, %o5 is %pc where fault occurred */
|
||||
save %sp, -104, %sp
|
||||
mov %i5, %o0
|
||||
mov %i7, %o1
|
||||
call lookup_fault
|
||||
mov %i4, %o2
|
||||
ret
|
||||
restore
|
||||
and %o1, 0x7f, %o1
|
||||
retl
|
||||
add %o3, %o1, %o0
|
||||
|
||||
.globl __bzero_end
|
||||
__bzero_end:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue